Skip to main content

alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fidl_fuchsia_driver_token as fdt;
38use fidl_fuchsia_hardware_hrtimer as ffhh;
39use fidl_fuchsia_time_alarms as fta;
40use fuchsia_async as fasync;
41use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
42use fuchsia_inspect as finspect;
43use fuchsia_inspect::{IntProperty, NumericProperty, Property};
44use fuchsia_runtime as fxr;
45use fuchsia_trace as trace;
46use futures::channel::mpsc;
47use futures::sink::SinkExt;
48use futures::{StreamExt, TryStreamExt};
49use log::{debug, error, warn};
50use scopeguard::defer;
51use std::cell::RefCell;
52use std::rc::Rc;
53use std::sync::LazyLock;
54use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
55use zx::AsHandleRef;
56
57static DEBUG_STACK_TRACE_TOKEN: std::sync::OnceLock<zx::Event> = std::sync::OnceLock::new();
58static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
59static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
60
61/// The largest value of timer "ticks" that is still considered useful.
62static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
63
64/// The smallest value of "ticks" that we can program into the driver. To wit,
65/// driver will reject "0" ticks, even though it probably shouldn't. See
66/// for details: b/437177931.
67static MIN_USEFUL_TICKS: u64 = 1;
68
69/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
70/// Starnix, and should eventually no longer be critical.
71const MAIN_TIMER_ID: usize = 6;
72
73/// This is what we consider a "long" delay in alarm operations.
74const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
75
76const TIMEOUT_SECONDS: i64 = 40;
77
78async fn request_stack_trace() {
79    if let Some(ev) = DEBUG_STACK_TRACE_TOKEN.get() {
80        log::warn!("*** DRIVER STACK TRACE REQUESTED: expect a driver stack trace below.");
81        let ev_dup = clone_handle(ev);
82        let debug_proxy = fuchsia_component::client::connect_to_protocol::<fdt::DebugMarker>();
83        match debug_proxy {
84            Ok(proxy) => {
85                if let Err(e) = proxy.log_stack_trace(ev_dup).await {
86                    log::warn!("failed to log stack trace: {:?}", e);
87                }
88            }
89            Err(e) => {
90                log::warn!("failed to connect to Debug protocol: {:?}", e);
91            }
92        }
93    } else {
94        log::warn!("DEBUG_STACK_TRACE_TOKEN not initialized, cannot log stack trace");
95    }
96}
97
98/// Waits on a future, but if the future takes longer than `TIMEOUT_SECONDS`, we log a warning and
99/// a stack trace. A stack trace is requested at most once once for each call.
100macro_rules! log_long_op {
101    ($fut:expr) => {{
102        use futures::FutureExt;
103        let fut = $fut;
104        futures::pin_mut!(fut);
105        let mut logged = false;
106        loop {
107            let timeout = fasync::Timer::new(zx::MonotonicDuration::from_seconds(TIMEOUT_SECONDS));
108            futures::select! {
109                res = fut.as_mut().fuse() => {
110                    if logged {
111                        log::warn!("unexpected blocking is now resolved: long-running async operation at {}:{}.",
112                            file!(), line!());
113                    }
114                    break res;
115                }
116                _ = timeout.fuse() => {
117                    // Check logs for a `kBadState` status reported from the hrtimer driver.
118                    log::warn!("unexpected blocking: long-running async op at {}:{}. Report to `componentId:1408151`",
119                        file!(), line!());
120                    if !logged {
121                        #[cfg(all(target_os = "fuchsia", not(doc)))]
122                        ::debug::backtrace_request_all_threads();
123                        fasync::Task::local(request_stack_trace()).detach();
124                    }
125                    logged = true;
126                }
127            }
128        }
129    }};
130}
131
132/// Increments the value of an underlying inspect property during its lifetime.
133struct ScopedInc<'a> {
134    property: &'a IntProperty,
135}
136
137impl<'a> ScopedInc<'a> {
138    fn new(property: &'a IntProperty) -> Self {
139        property.add(1);
140        Self { property }
141    }
142}
143
144impl<'a> Drop for ScopedInc<'a> {
145    fn drop(&mut self) {
146        self.property.add(-1);
147    }
148}
149
150/// Compares two optional deadlines and returns true if the `before is different from `after.
151/// Nones compare as equal.
152fn is_deadline_changed(
153    before: Option<fasync::BootInstant>,
154    after: Option<fasync::BootInstant>,
155) -> bool {
156    match (before, after) {
157        (None, None) => false,
158        (None, Some(_)) | (Some(_), None) => true,
159        (Some(before), Some(after)) => before != after,
160    }
161}
162
163// Errors returnable from [TimerOps] calls.
164#[derive(Debug, Clone)]
165pub(crate) enum TimerOpsError {
166    /// The driver reported an error.
167    Driver(ffhh::DriverError),
168    /// FIDL-specific RPC error.
169    Fidl(fidl::Error),
170}
171
172impl Into<fta::WakeAlarmsError> for TimerOpsError {
173    /// Compute the error that gets propagated to callers, depending on messages
174    /// from the driver.
175    fn into(self) -> fta::WakeAlarmsError {
176        match self {
177            TimerOpsError::Fidl(fidl::Error::ClientChannelClosed { .. }) => {
178                fta::WakeAlarmsError::DriverConnection
179            }
180            TimerOpsError::Driver(ffhh::DriverError::InternalError) => fta::WakeAlarmsError::Driver,
181            _ => fta::WakeAlarmsError::Internal,
182        }
183    }
184}
185
186impl TimerOpsError {
187    fn is_canceled(&self) -> bool {
188        match self {
189            TimerOpsError::Driver(ffhh::DriverError::Canceled) => true,
190            _ => false,
191        }
192    }
193}
194
195trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
196    // nop
197}
198
199/// Abstracts away timer operations.
200#[async_trait(?Send)]
201pub(crate) trait TimerOps {
202    /// Stop the timer with the specified ID.
203    async fn stop(&self, id: u64);
204
205    /// Examine the timer's properties, such as supported resolutions and tick
206    /// counts.
207    async fn get_timer_properties(&self) -> TimerConfig;
208
209    /// This method must return an actual future, to handle the borrow checker:
210    /// making this async will assume that `self` remains borrowed, which will
211    /// thwart attempts to move the return value of this call into a separate
212    /// closure.
213    fn start_and_wait(
214        &self,
215        id: u64,
216        resolution: &ffhh::Resolution,
217        ticks: u64,
218        setup_event: zx::Event,
219    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
220}
221
222/// TimerOps backed by an actual hardware timer.
223struct HardwareTimerOps {
224    proxy: ffhh::DeviceProxy,
225}
226
227impl HardwareTimerOps {
228    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
229        Box::new(Self { proxy })
230    }
231}
232
233#[async_trait(?Send)]
234impl TimerOps for HardwareTimerOps {
235    async fn stop(&self, id: u64) {
236        let _ = self
237            .proxy
238            .stop(id)
239            .await
240            .map(|result| {
241                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
242            })
243            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
244    }
245
246    async fn get_timer_properties(&self) -> TimerConfig {
247        match log_long_op!(self.proxy.get_properties()) {
248            Ok(p) => {
249                if let Some(token) = p.driver_node_token {
250                    let _ = DEBUG_STACK_TRACE_TOKEN.set(token);
251                }
252                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
253                debug!("get_timer_properties: got: {:?}", timers_properties);
254
255                // Pick the correct hrtimer to use for wakes.
256                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
257                    // Mostly vim3, where we have pre-existing timer allocations
258                    // that we don't need to change.
259                    MAIN_TIMER_ID
260                } else if timers_properties.len() > 0 {
261                    // Newer devices that don't need to allocate timer IDs, and/or
262                    // may not even have as many timers as vim3 does. But, at least
263                    // one timer is needed.
264                    0
265                } else {
266                    // Give up.
267                    return TimerConfig::new_empty();
268                };
269                let main_timer_properties = &timers_properties[timer_index];
270                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
271                // Not sure whether it is useful to have more ticks than this, so limit it.
272                let max_ticks: u64 = std::cmp::min(
273                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
274                    *MAX_USEFUL_TICKS,
275                );
276                let resolutions = &main_timer_properties
277                    .supported_resolutions
278                    .as_ref()
279                    .expect("supported_resolutions is populated")
280                    .iter()
281                    .last() //  Limits the resolution to the coarsest available.
282                    .map(|r| match *r {
283                        ffhh::Resolution::Duration(d) => d,
284                        _ => {
285                            error!(
286                            "get_timer_properties: Unknown resolution type, returning millisecond."
287                        );
288                            MSEC_IN_NANOS
289                        }
290                    })
291                    .map(|d| zx::BootDuration::from_nanos(d))
292                    .into_iter() // Used with .last() above.
293                    .collect::<Vec<_>>();
294                let timer_id = main_timer_properties.id.expect("timer ID is always present");
295                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
296            }
297            Err(e) => {
298                error!("could not get timer properties: {:?}", e);
299                TimerConfig::new_empty()
300            }
301        }
302    }
303
304    fn start_and_wait(
305        &self,
306        id: u64,
307        resolution: &ffhh::Resolution,
308        ticks: u64,
309        setup_event: zx::Event,
310    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
311        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
312        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
313    }
314}
315
316// Untangles the borrow checker issues that otherwise result from making
317// TimerOps::start_and_wait an async function.
318struct HwResponseFut {
319    pinner: std::pin::Pin<
320        Box<
321            fidl::client::QueryResponseFut<
322                ffhh::DeviceStartAndWaitResult,
323                fidl::encoding::DefaultFuchsiaResourceDialect,
324            >,
325        >,
326    >,
327}
328
329use std::task::Poll;
330impl SawResponseFut for HwResponseFut {}
331impl std::future::Future for HwResponseFut {
332    type Output = Result<zx::EventPair, TimerOpsError>;
333    fn poll(
334        mut self: std::pin::Pin<&mut Self>,
335        cx: &mut std::task::Context<'_>,
336    ) -> std::task::Poll<Self::Output> {
337        let inner_poll = self.pinner.as_mut().poll(cx);
338        match inner_poll {
339            Poll::Ready(result) => Poll::Ready(match result {
340                Ok(Ok(keep_alive)) => Ok(keep_alive),
341                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
342                Err(e) => Err(TimerOpsError::Fidl(e)),
343            }),
344            Poll::Pending => Poll::Pending,
345        }
346    }
347}
348
349/// Stops a currently running hardware timer.
350async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
351    trace::duration!("alarms", "hrtimer:stop", "id" => timer_config.id);
352    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
353    hrtimer.stop(timer_config.id).await;
354    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
355}
356
357// The default size of the channels created in this module.
358// This is very unlikely to create bottlenecks.
359const CHANNEL_SIZE: usize = 1000;
360
361/// A type handed around between the concurrent loops run by this module.
362#[derive(Debug)]
363enum Cmd {
364    /// Request a timer to be started.
365    Start {
366        /// The unique connection ID.
367        conn_id: zx::Koid,
368        /// A timestamp (presumably in the future), at which to expire the timer.
369        deadline: timers::Deadline,
370        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
371        //
372        // Optional, because not always needed:
373        //
374        // * `mode` is required for hanging get API calls (e.g. `StartAndWait`), as we must signal
375        //   when the alarm is scheduled.
376        // * The calls such as `SetUtc` which return only upon scheduling do not need a `mode`, as
377        //   the caller can wait for the call to return immediately.
378        mode: Option<fta::SetMode>,
379        /// An alarm identifier, chosen by the caller.
380        alarm_id: String,
381        /// A responder that will be called when the timer expires. The
382        /// client end of the connection will block until we send something
383        /// on this responder.
384        ///
385        /// This is packaged into a Rc... only because both the "happy path"
386        /// and the error path must consume the responder.  This allows them
387        /// to be consumed, without the responder needing to implement Default.
388        responder: Rc<dyn timers::Responder>,
389    },
390    StopById {
391        done: zx::Event,
392        timer_id: timers::Id,
393    },
394    Alarm {
395        expired_deadline: fasync::BootInstant,
396        keep_alive: fidl::EventPair,
397    },
398    AlarmFidlError {
399        expired_deadline: fasync::BootInstant,
400        error: fidl::Error,
401    },
402    AlarmDriverError {
403        expired_deadline: fasync::BootInstant,
404        error: ffhh::DriverError,
405
406        // Added these for debugging details, otherwise not necessary.
407        timer_config_id: u64,
408        resolution_nanos: i64,
409        ticks: u64,
410    },
411    /// The UTC clock transformation has been updated.
412    UtcUpdated {
413        // The new boot-to-utc clock transformation.
414        transform: fxr::UtcClockTransform,
415    },
416}
417
418impl std::fmt::Display for Cmd {
419    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
420        match self {
421            Cmd::Start { conn_id, deadline, alarm_id, .. } => {
422                write!(
423                    f,
424                    "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
425                    alarm_id, conn_id, deadline,
426                )
427            }
428            Cmd::Alarm { expired_deadline, .. } => {
429                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
430            }
431            Cmd::AlarmFidlError { expired_deadline, error } => {
432                write!(
433                    f,
434                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
435                    format_timer((*expired_deadline).into()),
436                    error
437                )
438            }
439            Cmd::AlarmDriverError { expired_deadline, error, .. } => {
440                write!(
441                    f,
442                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
443                    format_timer((*expired_deadline).into()),
444                    error
445                )
446            }
447            Cmd::StopById { timer_id, done: _ } => {
448                write!(f, "StopById[timerId={}]", timer_id,)
449            }
450            Cmd::UtcUpdated { transform } => {
451                write!(f, "UtcUpdated[timerId={transform:?}]")
452            }
453        }
454    }
455}
456
457/// Extracts a KOID from the underlying channel of the provided stream.
458///
459/// This function deconstructs the provided stream to access the underlying
460/// channel and extract its KOID. It then reconstructs the stream and returns
461/// it to the caller along with the KOID.
462///
463/// # Args
464/// - `stream`: The `fta::WakeAlarmsRequestStream` to extract the KOID from.
465///
466/// # Returns
467/// A tuple containing the `zx::Koid` of the stream's channel and the
468/// reconstructed `fta::WakeAlarmsRequestStream`.
469pub fn get_stream_koid(
470    stream: fta::WakeAlarmsRequestStream,
471) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
472    let (inner, is_terminated) = stream.into_inner();
473    let koid = inner.channel().as_channel().as_handle_ref().koid().expect("infallible");
474    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
475    (koid, stream)
476}
477
478/// Serves a single Wake API client.
479///
480/// This function processes incoming requests from a `fta::WakeAlarmsRequestStream`,
481/// handling each request by calling `handle_request`. It continues to process
482/// requests until the stream is exhausted.
483///
484/// # Args
485/// - `timer_loop`: A reference-counted pointer to the `Loop` that manages timers.
486/// - `requests`: The stream of incoming `fta::WakeAlarmsRequest` from a client.
487pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
488    let timer_loop = timer_loop.clone();
489    let timer_loop_send = || timer_loop.get_sender();
490    let (conn_id, mut requests) = get_stream_koid(requests);
491    let mut request_count = 0;
492    debug!("alarms::serve: opened connection: {:?}", conn_id);
493    while let Some(maybe_request) = requests.next().await {
494        request_count += 1;
495        debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
496        match maybe_request {
497            Ok(request) => {
498                // Should return quickly.
499                handle_request(conn_id, timer_loop_send(), request).await;
500            }
501            Err(e) => {
502                warn!("alarms::serve: error in request: {:?}", e);
503            }
504        }
505        debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
506    }
507    // Check if connection closure was intentional. It is way too easy to close
508    // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
509    warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
510}
511
512async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
513    let done = zx::Event::create();
514    let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
515    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
516        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
517    }
518    wait_signaled(&done).await;
519}
520
521/// Processes a single Wake API request from a single client.
522/// This function is expected to return quickly.
523///
524/// # Args
525/// - `conn_id`: the unique identifier of the connection producing these requests.
526/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
527/// - `request`: a single inbound Wake FIDL API request.
528async fn handle_request(
529    conn_id: zx::Koid,
530    mut cmd: mpsc::Sender<Cmd>,
531    request: fta::WakeAlarmsRequest,
532) {
533    match request {
534        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
535            // Since responder is consumed by the happy path and the error path, but not both,
536            // and because the responder does not implement Default, this is a way to
537            // send it in two mutually exclusive directions.  Each direction will reverse
538            // this wrapping once the responder makes it to the other side.
539            //
540            // Rc required because of sharing a noncopyable struct; RefCell required because
541            // borrow_mut() is needed to move out; and Option is required so we can
542            // use take() to replace the struct with None so it does not need to leave
543            // a Default in its place.
544            let responder = Rc::new(RefCell::new(Some(responder)));
545
546            // Alarm is not scheduled yet!
547            debug!(
548                "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
549                alarm_id,
550                conn_id,
551                format_timer(deadline.into())
552            );
553            // Expected to return quickly.
554            let deadline = timers::Deadline::Boot(deadline.into());
555            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
556                conn_id,
557                deadline,
558                mode: Some(mode),
559                alarm_id: alarm_id.clone(),
560                responder: responder.clone(),
561            })) {
562                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
563                responder
564                    .borrow_mut()
565                    .take()
566                    .expect("always present if call fails")
567                    .send(Err(fta::WakeAlarmsError::Internal))
568                    .unwrap();
569            }
570        }
571        fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
572            // Quickly get rid of the custom wake alarms deadline type.
573            let deadline =
574                timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
575
576            // The rest of this match branch is the same as for `SetAndWait`. However, the handling
577            // is for now simple enough that we don't need to explore factoring common actions out.
578            let responder = Rc::new(RefCell::new(Some(responder)));
579            debug!(
580                "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
581            );
582
583            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
584                conn_id,
585                deadline,
586                mode: Some(mode),
587                alarm_id: alarm_id.clone(),
588                responder: responder.clone(),
589            })) {
590                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
591                responder
592                    .borrow_mut()
593                    .take()
594                    .expect("always present if call fails")
595                    .send(Err(fta::WakeAlarmsError::Internal))
596                    .unwrap();
597            }
598        }
599        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
600            // TODO: b/383062441 - make this into an async task so that we wait
601            // less to schedule the next alarm.
602            log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
603        }
604        fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
605            // Alarm is not scheduled yet!
606            debug!(
607                "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
608                format_timer(deadline.into())
609            );
610            // Expected to return quickly.
611            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
612                conn_id,
613                deadline: timers::Deadline::Boot(deadline.into()),
614                mode: Some(mode),
615                alarm_id: alarm_id.clone(),
616                responder: Rc::new(RefCell::new(Some(notifier))),
617            })) {
618                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
619                responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
620            } else {
621                // Successfully scheduled the alarm.
622                responder.send(Ok(())).unwrap();
623            }
624        }
625        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
626    };
627}
628
629/// Represents a single alarm event processing loop.
630///
631/// One instance is created per each alarm-capable low-level device. The `Loop`
632/// is responsible for managing the lifecycle of wake alarms, including their
633/// creation, scheduling, and cancellation. It interacts with the underlying
634/// hardware timer through a `TimerOps` trait object.
635pub struct Loop {
636    // Given to any clients that need to send messages to `_task`
637    // via [get_sender].
638    snd: mpsc::Sender<Cmd>,
639}
640
641impl Loop {
642    /// Creates a new instance of `Loop`.
643    ///
644    /// This function initializes a new `Loop` with a connection to a low-level
645    /// hardware timer device. It spawns two background tasks: one for the main
646    /// timer event loop and another for monitoring UTC clock changes.
647    ///
648    /// # Args
649    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
650    /// - `device_proxy`: A `ffhh::DeviceProxy` for communicating with the hardware timer.
651    /// - `inspect`: A `finspect::Node` for recording diagnostics.
652    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
653    ///
654    /// # Returns
655    /// A new instance of `Loop`.
656    pub fn new(
657        scope: fasync::ScopeHandle,
658        device_proxy: ffhh::DeviceProxy,
659        inspect: finspect::Node,
660        utc_clock: fxr::UtcClock,
661    ) -> Self {
662        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
663        Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
664    }
665
666    /// Creates a new instance of `Loop` with emulated wake alarms.
667    ///
668    /// This function is similar to `new`, but it uses an emulated timer instead
669    /// of a real hardware timer. This is useful for testing environments where
670    /// a hardware timer may not be available.
671    ///
672    /// # Args
673    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
674    /// - `inspect`: A `finspect::Node` for recording diagnostics.
675    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
676    ///
677    /// # Returns
678    /// A new instance of `Loop` with an emulated timer.
679    pub fn new_emulated(
680        scope: fasync::ScopeHandle,
681        inspect: finspect::Node,
682        utc_clock: fxr::UtcClock,
683    ) -> Self {
684        let timer_ops = Box::new(EmulationTimerOps::new());
685        Loop::new_internal(scope, timer_ops, inspect, utc_clock)
686    }
687
688    fn new_internal(
689        scope: fasync::ScopeHandle,
690        timer_ops: Box<dyn TimerOps>,
691        inspect: finspect::Node,
692        utc_clock: fxr::UtcClock,
693    ) -> Self {
694        let utc_transform = Rc::new(RefCell::new(
695            utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
696        ));
697
698        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
699        let loop_scope = scope.clone();
700
701        scope.spawn_local(wake_timer_loop(
702            loop_scope,
703            snd.clone(),
704            rcv,
705            timer_ops,
706            inspect,
707            utc_transform,
708        ));
709        scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
710        Self { snd }
711    }
712
713    /// Gets a copy of a channel through which async commands may be sent to
714    /// the [Loop].
715    fn get_sender(&self) -> mpsc::Sender<Cmd> {
716        self.snd.clone()
717    }
718}
719
720// Forwards the clock transformation of an updated clock into the alarm manager, to allow
721// correcting the boot time deadlines of clocks on the UTC timeline.
722async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
723    let koid = utc_clock.as_handle_ref().koid();
724    log::info!("monitor_utc_clock_changes: entry");
725    loop {
726        // CLOCK_UPDATED signal is self-clearing.
727        fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
728            .await
729            .expect("UTC clock is readable");
730
731        let transform =
732            utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
733        log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
734        if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
735            // This is OK in tests.
736            log::warn!("monitor_utc_clock_changes: exit: {err:?}");
737            break;
738        }
739    }
740}
741
742/// Clones a handle infallibly with `zx::Rights::SAME_RIGHTS`.
743///
744/// This function duplicates a handle, preserving its rights. It will panic if
745/// the handle duplication fails, which is not expected to happen under normal
746/// circumstances.
747///
748/// # Args
749/// - `handle`: A reference to a handle-based object to be cloned.
750///
751/// # Returns
752/// A new handle with the same rights as the original.
753pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
754    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
755}
756
757async fn wait_signaled<H: HandleBased>(handle: &H) {
758    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
759}
760
761pub(crate) fn signal(event: &zx::Event) {
762    event.signal(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
763}
764
765/// A [TimerDuration] represents a duration of time that can be expressed by
766/// a discrete timer register.
767///
768/// This is a low-level representation of time duration, used in interaction with
769/// hardware devices. It is therefore necessarily discretized, with adaptive
770/// resolution, depending on the physical characteristics of the underlying
771/// hardware timer that it models.
772#[derive(Debug, Clone, Copy)]
773struct TimerDuration {
774    // The resolution of each one of the `ticks` below.
775    resolution: zx::BootDuration,
776    // The number of ticks that encodes time duration. Each "tick" represents
777    // one unit of `resolution` above.
778    ticks: u64,
779}
780
781/// This and the comparison traits below are used to allow TimerDuration
782/// calculations in a compact form.
783impl Eq for TimerDuration {}
784
785impl std::cmp::PartialOrd for TimerDuration {
786    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
787        Some(self.cmp(other))
788    }
789}
790
791impl std::cmp::PartialEq for TimerDuration {
792    fn eq(&self, other: &Self) -> bool {
793        self.cmp(other) == std::cmp::Ordering::Equal
794    }
795}
796
797impl std::cmp::Ord for TimerDuration {
798    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
799    /// no matter the resolutions.
800    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
801        let self_ticks_128: i128 = self.ticks as i128;
802        let self_resolution: i128 = self.resolution_as_nanos() as i128;
803        let self_nanos = self_resolution * self_ticks_128;
804
805        let other_ticks_128: i128 = other.ticks as i128;
806        let other_resolution: i128 = other.resolution_as_nanos() as i128;
807        let other_nanos = other_resolution * other_ticks_128;
808
809        self_nanos.cmp(&other_nanos)
810    }
811}
812
813impl std::fmt::Display for TimerDuration {
814    /// Human readable TimerDuration exposes both the tick count and the resolution,
815    /// in the format of "ticks x resolution", with an end result of
816    /// `10x5ms` for example.
817    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
818        let ticks = self.ticks;
819        let resolution = self.resolution();
820        // Example: 10x1ms
821        write!(f, "{}x{}", ticks, format_duration(resolution),)
822    }
823}
824
825impl TimerDuration {
826    /// The maximum representable TimerDuration that we allow.
827    fn max() -> Self {
828        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
829    }
830
831    /// The zero [TimerDuration].
832    fn zero() -> Self {
833        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
834    }
835
836    /// Creates a new timer duration with the given parameters.
837    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
838        Self { resolution, ticks }
839    }
840
841    /// Creates a new timer duration using the resolution from `res_source` and
842    /// a specified number of ticks.
843    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
844        Self::new(res_source.resolution, ticks)
845    }
846
847    /// Returns the time duration represented by this TimerDuration.
848    ///
849    /// Due to the way duration is expressed, the same time duration
850    /// can be represented in multiple ways.
851    fn duration(&self) -> zx::BootDuration {
852        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
853        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
854        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
855    }
856
857    /// The resolution of this TimerDuration
858    fn resolution(&self) -> zx::BootDuration {
859        self.resolution
860    }
861
862    fn resolution_as_nanos(&self) -> u64 {
863        self.resolution().into_nanos().try_into().expect("resolution is never negative")
864    }
865
866    /// The number of ticks of this [TimerDuration].
867    fn ticks(&self) -> u64 {
868        self.ticks
869    }
870}
871
872impl From<zx::BootDuration> for TimerDuration {
873    fn from(d: zx::BootDuration) -> TimerDuration {
874        let nanos = d.into_nanos();
875        assert!(nanos >= 0);
876        let nanos_u64 = nanos.try_into().expect("guarded by assert");
877        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
878    }
879}
880
881impl std::ops::Div for TimerDuration {
882    type Output = u64;
883    fn div(self, rhs: Self) -> Self::Output {
884        let self_nanos = self.resolution_as_nanos() * self.ticks;
885        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
886        self_nanos / rhs_nanos
887    }
888}
889
890impl std::ops::Mul<u64> for TimerDuration {
891    type Output = Self;
892    fn mul(self, rhs: u64) -> Self::Output {
893        Self::new(self.resolution, self.ticks * rhs)
894    }
895}
896
897/// Contains the configuration of a specific timer.
898#[derive(Debug)]
899pub(crate) struct TimerConfig {
900    /// The resolutions supported by this timer. Each entry is one possible
901    /// duration for on timer "tick".  The resolution is picked when a timer
902    /// request is sent.
903    ///
904    /// The resolutions MUST be sorted from finest (index 0) to coarsest.
905    ///
906    /// There MUST be at least one resolution.
907    resolutions: Vec<zx::BootDuration>,
908    /// The maximum count of "ticks" that the timer supports. The timer usually
909    /// has a register that counts up or down based on a clock signal with
910    /// the period specified by `resolutions`.  This is the maximum value that
911    /// the counter can count to without overflowing.
912    max_ticks: u64,
913    /// The stable ID of the timer with the above configuration.
914    id: u64,
915}
916
917impl TimerConfig {
918    /// Creates a new timer config with supported timer resolutions and the max
919    /// ticks value for the timer's counter.
920    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
921        debug!(
922            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
923            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
924            max_ticks,
925            timer_id
926        );
927        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
928        TimerConfig { resolutions, max_ticks, id: timer_id }
929    }
930
931    fn new_empty() -> Self {
932        error!("TimerConfig::new_empty() called, this is not OK.");
933        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
934    }
935
936    // Picks the most appropriate timer setting for it to fire as close as possible
937    // when `duration` expires.
938    //
939    // If duration is too far in the future for what the timer supports,
940    // return a smaller value, to allow the timer to be reprogrammed multiple
941    // times.
942    //
943    // If the available menu of resolutions is such that we can wake only after
944    // the intended deadline, begrudgingly return that option.
945    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
946        assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
947
948        // Driver does not support zero ticks, so we must accept the finest resolution duration
949        // instead.
950        if duration <= zx::BootDuration::ZERO {
951            return TimerDuration::new(self.resolutions[0], 1);
952        }
953
954        //  0         |-------------->|<---------------|
955        //  |---------+---------------+----------------+---->
956        //  |---------^               |                |
957        //  | best positive slack     |                |
958        //  |-------------------------^ duration       |
959        //  |------------------------------------------^ best negative slack.
960        let mut best_positive_slack = TimerDuration::zero();
961        let mut best_negative_slack = TimerDuration::max();
962
963        if self.max_ticks == 0 {
964            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
965        }
966        let duration_slack: TimerDuration = duration.into();
967
968        for res1 in self.resolutions.iter() {
969            let smallest_unit = TimerDuration::new(*res1, 1);
970            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
971
972            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
973            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
974
975            if smallest_slack_larger_than_duration {
976                if duration_slack == TimerDuration::zero() {
977                    best_negative_slack = TimerDuration::zero();
978                } else if smallest_unit < best_negative_slack {
979                    best_negative_slack = smallest_unit;
980                }
981            }
982            if largest_slack_smaller_than_duration {
983                if max_tick_at_res > best_positive_slack
984                    || best_positive_slack == TimerDuration::zero()
985                {
986                    best_positive_slack = max_tick_at_res;
987                }
988            }
989
990            // "Regular" case.
991            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
992                // Check whether duration divides evenly into the available slack options
993                // for this resolution.  If it does, then that is the slack we're looking for.
994                let q = duration_slack / smallest_unit;
995                let d = smallest_unit * q;
996                if d == duration_slack {
997                    // Exact match, we can return right now.
998                    return d;
999                } else {
1000                    // Not an exact match, so q ticks is before, but q+1 is after.
1001                    if d > best_positive_slack {
1002                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
1003                    }
1004                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
1005                    if d_plus < best_negative_slack {
1006                        best_negative_slack = d_plus;
1007                    }
1008                }
1009            }
1010        }
1011
1012        let p_slack = duration - best_positive_slack.duration();
1013        let n_slack = best_negative_slack.duration() - duration;
1014
1015        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
1016        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
1017        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
1018        // us to pick a finer count in the next go.
1019        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
1020            best_positive_slack
1021        } else {
1022            best_negative_slack
1023        };
1024        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
1025        assert!(
1026            ret.duration().into_nanos() >= 0,
1027            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
1028            ret,
1029            format_duration(p_slack),
1030            format_duration(n_slack),
1031            format_duration(duration),
1032            best_positive_slack,
1033            best_negative_slack,
1034            duration_slack,
1035            p_slack != zx::BootDuration::ZERO,
1036            p_slack,
1037            zx::BootDuration::ZERO,
1038        );
1039        ret
1040    }
1041}
1042
1043async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1044    debug!("get_timer_properties: requesting timer properties.");
1045    hrtimer.get_timer_properties().await
1046}
1047
1048/// The state of a single hardware timer that we must bookkeep.
1049struct TimerState {
1050    // The task waiting for the proximate timer to expire.
1051    task: fasync::Task<()>,
1052    // The deadline that the above task is waiting for.
1053    deadline: fasync::BootInstant,
1054}
1055
1056/// The command loop for timer interaction.  All changes to the wake alarm device programming
1057/// come in form of commands through `cmd`.
1058///
1059/// Args:
1060/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1061/// - `cmds``: the input queue of alarm related commands.
1062/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1063/// - `inspect`: the inspect node to record loop info into.
1064async fn wake_timer_loop(
1065    scope: fasync::ScopeHandle,
1066    snd: mpsc::Sender<Cmd>,
1067    mut cmds: mpsc::Receiver<Cmd>,
1068    timer_proxy: Box<dyn TimerOps>,
1069    inspect: finspect::Node,
1070    utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1071) {
1072    debug!("wake_timer_loop: started");
1073
1074    let mut timers = timers::Heap::new(utc_transform.clone());
1075    let timer_config = get_timer_properties(&timer_proxy).await;
1076
1077    // Keeps the currently executing HrTimer closure.  This is not read from, but
1078    // keeps the timer task active.
1079    #[allow(clippy::collection_is_never_read)]
1080    let mut hrtimer_status: Option<TimerState> = None;
1081
1082    // Initialize inspect properties. This must be done only once.
1083    //
1084    // Take note that these properties are updated when the `cmds` loop runs.
1085    // This means that repeated reads while no `cmds` activity occurs will return
1086    // old readings.  This is to ensure a consistent ability to replay the last
1087    // loop run if needed.
1088    let now_prop = inspect.create_int("now_ns", 0);
1089    let now_formatted_prop = inspect.create_string("now_formatted", "");
1090    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1091    let pending_timers_prop = inspect.create_string("pending_timers", "");
1092    let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1093        "requested_deadlines_ns",
1094        finspect::ExponentialHistogramParams {
1095            floor: 0,
1096            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1097            // Allows capturing deadlines up to dozens of days.
1098            step_multiplier: 10,
1099            buckets: 16,
1100        },
1101    );
1102    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1103        "slack_ns",
1104        finspect::ExponentialHistogramParams {
1105            floor: 0,
1106            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1107            step_multiplier: 10,
1108            buckets: 16,
1109        },
1110    );
1111    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1112        "schedule_delay_ns",
1113        finspect::ExponentialHistogramParams {
1114            floor: 0,
1115            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1116            step_multiplier: 10,
1117            buckets: 16,
1118        },
1119    );
1120    let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1121    let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1122    // Internals of what was programmed into the wake alarms hardware.
1123    let hw_node = inspect.create_child("hardware");
1124    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1125    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1126
1127    // Debug nodes for b/454085350.
1128    let debug_node = inspect.create_child("debug_node");
1129    let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1130    let start_count = debug_node.create_int("start_count", 0);
1131    let responder_count = debug_node.create_int("responder_count", 0);
1132    let stop_count = debug_node.create_int("stop", 0);
1133    let stop_responder_count = debug_node.create_int("stop_responder", 0);
1134    let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1135    let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1136    let alarm_count = debug_node.create_int("alarm", 0);
1137    let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1138    let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1139    let utc_update_count = debug_node.create_int("utc_update", 0);
1140    let status_count = debug_node.create_int("status", 0);
1141    let loop_count = debug_node.create_int("loop_count", 0);
1142
1143    let hrtimer_node = debug_node.create_child("hrtimer");
1144
1145    const LRU_CACHE_CAPACITY: usize = 100;
1146    let mut error_cache = lru_cache::LruCache::new(LRU_CACHE_CAPACITY);
1147
1148    while let Some(cmd) = cmds.next().await {
1149        let _i = ScopedInc::new(&loop_count);
1150        trace::duration!("alarms", "Cmd");
1151        // Use a consistent notion of "now" across commands.
1152        let now = fasync::BootInstant::now();
1153        now_prop.set(now.into_nanos());
1154        trace::instant!("alarms", "wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1155        match cmd {
1156            Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1157                let _i = ScopedInc::new(&start_count);
1158                trace::duration!("alarms", "Cmd::Start");
1159                fuchsia_trace::flow_step!(
1160                    "alarms",
1161                    "hrtimer_lifecycle",
1162                    timers::get_trace_id(&alarm_id)
1163                );
1164                // NOTE: hold keep_alive until all work is done.
1165                debug!(
1166                    "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow:      {}",
1167                    alarm_id,
1168                    conn_id,
1169                    deadline,
1170                    format_timer(now.into()),
1171                );
1172
1173                defer! {
1174                    let _i = ScopedInc::new(&start_notify_setup_count);
1175                    // This is the only option that requires further action.
1176                    if let Some(mode) = mode {
1177                        if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1178                            // Must signal once the setup is completed.
1179                            signal(&setup_done);
1180                            debug!("wake_timer_loop: START: setup_done signaled");
1181                        };
1182                    }
1183                }
1184                let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1185
1186                // TODO: b/444236931: re-enable.
1187                //// Bookkeeping, record the incidence of deadline types.
1188                //deadline_histogram_prop.insert((deadline_boot - now).into_nanos());
1189                match deadline {
1190                    timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1191                    timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1192                };
1193
1194                if timers::Heap::expired(now, deadline_boot) {
1195                    trace::duration!("alarms", "Cmd::Start:immediate");
1196                    fuchsia_trace::flow_step!(
1197                        "alarms",
1198                        "hrtimer_lifecycle",
1199                        timers::get_trace_id(&alarm_id)
1200                    );
1201                    // A timer set into now or the past expires right away.
1202                    let (_lease, keep_alive) = zx::EventPair::create();
1203                    debug!(
1204                        "[{}] wake_timer_loop: bogus lease {:?}",
1205                        line!(),
1206                        &keep_alive.koid().unwrap()
1207                    );
1208
1209                    {
1210                        let _i1 = ScopedInc::new(&responder_count);
1211                        if let Err(e) = responder
1212                            .send(&alarm_id, Ok(keep_alive))
1213                            .expect("responder is always present")
1214                        {
1215                            error!(
1216                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1217                            );
1218                        } else {
1219                            debug!(
1220                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1221                                format_timer(deadline_boot.into()),
1222                                format_timer(now.into()),
1223                                deadline,
1224                            )
1225                        }
1226                    }
1227                } else {
1228                    trace::duration!("alarms", "Cmd::Start:regular");
1229                    fuchsia_trace::flow_step!(
1230                        "alarms",
1231                        "hrtimer_lifecycle",
1232                        timers::get_trace_id(&alarm_id)
1233                    );
1234                    // A timer scheduled for the future gets inserted into the timer heap.
1235                    let was_empty = timers.is_empty();
1236
1237                    let deadline_before = timers.peek_deadline_as_boot();
1238                    let node = match deadline {
1239                        timers::Deadline::Boot(_) => {
1240                            timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1241                        }
1242                        timers::Deadline::Utc(d) => {
1243                            timers.new_node_utc(d, alarm_id, conn_id, responder)
1244                        }
1245                    };
1246                    timers.push(node);
1247                    let deadline_after = timers.peek_deadline_as_boot();
1248
1249                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1250                    let needs_cancel = !was_empty && deadline_changed;
1251                    let needs_reschedule = was_empty || deadline_changed;
1252
1253                    if needs_reschedule {
1254                        // Always schedule the proximate deadline.
1255                        let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1256                        if needs_cancel {
1257                            log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1258                        }
1259                        hrtimer_status = Some(
1260                            schedule_hrtimer(
1261                                scope.clone(),
1262                                now,
1263                                &timer_proxy,
1264                                schedulable_deadline,
1265                                snd.clone(),
1266                                &timer_config,
1267                                &schedule_delay_prop,
1268                                &hrtimer_node,
1269                            )
1270                            .await,
1271                        );
1272                    }
1273                }
1274            }
1275            Cmd::StopById { timer_id, done } => {
1276                let _i = ScopedInc::new(&stop_count);
1277                defer! {
1278                    signal(&done);
1279                }
1280                trace::duration!("alarms", "Cmd::StopById", "alarm_id" => timer_id.alarm());
1281                fuchsia_trace::flow_step!(
1282                    "alarms",
1283                    "hrtimer_lifecycle",
1284                    timers::get_trace_id(&timer_id.alarm())
1285                );
1286                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1287                let deadline_before = timers.peek_deadline_as_boot();
1288
1289                if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1290                    let deadline_after = timers.peek_deadline_as_boot();
1291
1292                    {
1293                        let _i = ScopedInc::new(&stop_responder_count);
1294                        if let Some(res) = timer_node
1295                            .get_responder()
1296                            .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1297                        {
1298                            // We must reply to the responder to keep the connection open.
1299                            res.expect("infallible");
1300                        }
1301                    }
1302                    if is_deadline_changed(deadline_before, deadline_after) {
1303                        let _i = ScopedInc::new(&stop_hrtimer_count);
1304                        log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1305                    }
1306                    if let Some(deadline) = deadline_after {
1307                        let _i = ScopedInc::new(&schedule_hrtimer_count);
1308                        // Reschedule the hardware timer if the removed timer is the earliest one,
1309                        // and another one exists.
1310                        let new_timer_state = schedule_hrtimer(
1311                            scope.clone(),
1312                            now,
1313                            &timer_proxy,
1314                            deadline,
1315                            snd.clone(),
1316                            &timer_config,
1317                            &schedule_delay_prop,
1318                            &hrtimer_node,
1319                        )
1320                        .await;
1321                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1322                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1323                            // Allow the task to complete. Since this task should have been
1324                            // canceled or completed already, this call should not block for
1325                            // a long time.
1326                            log_long_op!(task);
1327                        }
1328                    } else {
1329                        // No next timer, clean up the hrtimer status.
1330                        hrtimer_status = None;
1331                    }
1332                } else {
1333                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1334                }
1335            }
1336            Cmd::Alarm { expired_deadline, keep_alive } => {
1337                let _i = ScopedInc::new(&alarm_count);
1338
1339                trace::duration!("alarms", "Cmd::Alarm");
1340                // Expire all eligible timers, based on "now".  This is because
1341                // we may have woken up earlier than the actual deadline. This
1342                // happens for example if the timer can not make the actual
1343                // deadline and needs to be re-programmed.
1344                debug!(
1345                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1346                    format_timer(expired_deadline.into()),
1347                    keep_alive.koid().unwrap(),
1348                );
1349                let expired_count =
1350                    notify_all(&mut timers, &keep_alive, now, None, &slack_histogram_prop)
1351                        .expect("notification succeeds");
1352                if expired_count == 0 {
1353                    // This could be a resolution switch, or a straggler notification.
1354                    // Either way, the hardware timer is still ticking, cancel it.
1355                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1356                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1357                }
1358                // There is a timer to reschedule, do that now.
1359                hrtimer_status = match timers.peek_deadline_as_boot() {
1360                    None => None,
1361                    Some(deadline) => Some(
1362                        schedule_hrtimer(
1363                            scope.clone(),
1364                            now,
1365                            &timer_proxy,
1366                            deadline,
1367                            snd.clone(),
1368                            &timer_config,
1369                            &schedule_delay_prop,
1370                            &hrtimer_node,
1371                        )
1372                        .await,
1373                    ),
1374                }
1375            }
1376            Cmd::AlarmFidlError { expired_deadline, error } => {
1377                let _i = ScopedInc::new(&alarm_fidl_count);
1378
1379                trace::duration!("alarms", "Cmd::AlarmFidlError");
1380                // We do not have a wake lease, so the system may sleep before
1381                // we get to schedule a new timer. We have no way to avoid it
1382                // today.
1383                let error_string = format!("{}", error);
1384                if !error_cache.contains_key(&error_string) {
1385                    warn!(
1386                        "wake_timer_loop: FIDL error: {}, deadline: {}, now: {}",
1387                        error,
1388                        format_timer(expired_deadline.into()),
1389                        format_timer(now.into()),
1390                    );
1391                    error_cache.insert(error_string, ());
1392                }
1393                // Manufacture a fake lease to make the code below work.
1394                // Maybe use Option instead?
1395                let (_dummy_lease, peer) = zx::EventPair::create();
1396                debug!(
1397                    "bogus lease: {:?} fidl error [{}:{}]",
1398                    &peer.koid().unwrap(),
1399                    file!(),
1400                    line!()
1401                );
1402                notify_all(
1403                    &mut timers,
1404                    &peer,
1405                    now,
1406                    Some(TimerOpsError::Fidl(error)),
1407                    &slack_histogram_prop,
1408                )
1409                .expect("notification succeeds");
1410                hrtimer_status = match timers.peek_deadline_as_boot() {
1411                    None => None, // No remaining timers, nothing to schedule.
1412                    Some(deadline) => Some(
1413                        schedule_hrtimer(
1414                            scope.clone(),
1415                            now,
1416                            &timer_proxy,
1417                            deadline,
1418                            snd.clone(),
1419                            &timer_config,
1420                            &schedule_delay_prop,
1421                            &hrtimer_node,
1422                        )
1423                        .await,
1424                    ),
1425                }
1426            }
1427            Cmd::AlarmDriverError {
1428                expired_deadline,
1429                error,
1430                timer_config_id,
1431                resolution_nanos,
1432                ticks,
1433            } => {
1434                let _i = ScopedInc::new(&alarm_driver_count);
1435
1436                trace::duration!("alarms", "Cmd::AlarmDriverError");
1437                let (_dummy_lease, peer) = zx::EventPair::create();
1438                debug!(
1439                    "bogus lease: {:?} driver error. [{}:{}]",
1440                    &peer.koid().unwrap(),
1441                    file!(),
1442                    line!()
1443                );
1444                notify_all(
1445                    &mut timers,
1446                    &peer,
1447                    now,
1448                    Some(TimerOpsError::Driver(error)),
1449                    &slack_histogram_prop,
1450                )
1451                .expect("notification succeeds");
1452                match error {
1453                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1454                        // Nothing to do here, cancelation is handled in Stop code.
1455                        debug!(
1456                            "wake_timer_loop: CANCELED timer at deadline: {}",
1457                            format_timer(expired_deadline.into())
1458                        );
1459                    }
1460                    _ => {
1461                        error!(
1462                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1463                            error,
1464                            format_timer(expired_deadline.into()),
1465                            format_timer(now.into()),
1466                            timer_config_id,
1467                            resolution_nanos,
1468                            ticks,
1469                        );
1470                        // We do not have a wake lease, so the system may sleep before
1471                        // we get to schedule a new timer. We have no way to avoid it
1472                        // today.
1473                        hrtimer_status = match timers.peek_deadline_as_boot() {
1474                            None => None,
1475                            Some(deadline) => Some(
1476                                schedule_hrtimer(
1477                                    scope.clone(),
1478                                    now,
1479                                    &timer_proxy,
1480                                    deadline,
1481                                    snd.clone(),
1482                                    &timer_config,
1483                                    &schedule_delay_prop,
1484                                    &hrtimer_node,
1485                                )
1486                                .await,
1487                            ),
1488                        }
1489                    }
1490                }
1491            }
1492            Cmd::UtcUpdated { transform } => {
1493                let _i = ScopedInc::new(&utc_update_count);
1494
1495                trace::duration!("alarms", "Cmd::UtcUpdated");
1496                debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1497
1498                // Assigning to this shared reference updates the deadlines of all
1499                // UTC timers.
1500                *utc_transform.borrow_mut() = transform;
1501
1502                // Reschedule the hardware timer with the now-current deadline if there is an
1503                // active timer.
1504                if hrtimer_status.is_some() {
1505                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1506                    // Should we request a wake lock here?
1507                    hrtimer_status = match timers.peek_deadline_as_boot() {
1508                        None => None,
1509                        Some(deadline) => Some(
1510                            schedule_hrtimer(
1511                                scope.clone(),
1512                                now,
1513                                &timer_proxy,
1514                                deadline,
1515                                snd.clone(),
1516                                &timer_config,
1517                                &schedule_delay_prop,
1518                                &hrtimer_node,
1519                            )
1520                            .await,
1521                        ),
1522                    }
1523                }
1524            }
1525        }
1526
1527        {
1528            let _i = ScopedInc::new(&status_count);
1529
1530            // Print and record diagnostics after each iteration, record the
1531            // duration for performance awareness.  Note that iterations happen
1532            // only occasionally, so these stats can remain unchanged for a long
1533            // time.
1534            trace::duration!("timekeeper", "inspect");
1535            let now_formatted = format_timer(now.into());
1536            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1537            now_formatted_prop.set(&now_formatted);
1538
1539            let pending_timers_count: u64 =
1540                timers.timer_count().try_into().expect("always convertible");
1541            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1542            pending_timers_count_prop.set(pending_timers_count);
1543
1544            let pending_timers = format!("{}", timers);
1545            debug!("wake_timer_loop: currently pending timers:        \n\t{}", &timers);
1546            pending_timers_prop.set(&pending_timers);
1547
1548            let current_deadline: String = hrtimer_status
1549                .as_ref()
1550                .map(|s| format!("{}", format_timer(s.deadline.into())))
1551                .unwrap_or_else(|| "(none)".into());
1552            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1553            current_hw_deadline_prop.set(&current_deadline);
1554
1555            let remaining_duration_until_alarm = hrtimer_status
1556                .as_ref()
1557                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1558                .unwrap_or_else(|| "(none)".into());
1559            debug!(
1560                "wake_timer_loop: remaining duration until alarm:  {}",
1561                &remaining_duration_until_alarm
1562            );
1563            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1564            debug!("---");
1565        }
1566    }
1567
1568    // Prod code should not see this loop ever exiting. the wake alarm manager
1569    // should run forever.
1570    log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1571}
1572
1573/// Schedules a wake alarm.
1574///
1575/// # Args:
1576///
1577/// - `scope`: used to spawn async tasks.
1578/// - `now`: the time instant used as the value of current instant.
1579/// - `hrtimer`: the proxy for the hrtimer device driver.
1580/// - `deadline`: the time instant in the future at which the alarm should fire.
1581/// - `command_send`: the sender channel to use when the timer expires.
1582/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1583///   max tick value.
1584/// - `schedule_delay_histogram`: inspect instrumentation.
1585/// - `debug_node`: used for keeping debug counters.
1586async fn schedule_hrtimer(
1587    scope: fasync::ScopeHandle,
1588    now: fasync::BootInstant,
1589    hrtimer: &Box<dyn TimerOps>,
1590    deadline: fasync::BootInstant,
1591    mut command_send: mpsc::Sender<Cmd>,
1592    timer_config: &TimerConfig,
1593    _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1594    debug_node: &finspect::Node,
1595) -> TimerState {
1596    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1597    trace::duration!("alarms", "schedule_hrtimer", "timeout" => timeout.into_nanos());
1598    // When signaled, the hrtimer has been scheduled.
1599    let hrtimer_scheduled = zx::Event::create();
1600
1601    let schedule_count = debug_node.create_int("schedule", 0);
1602    let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1603    let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1604
1605    let _sc = ScopedInc::new(&schedule_count);
1606
1607    debug!(
1608        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1609        format_timer(now.into()),
1610        format_timer(deadline.into()),
1611        format_duration(timeout),
1612    );
1613
1614    let slack = timer_config.pick_setting(timeout);
1615    let resolution_nanos = slack.resolution.into_nanos();
1616    // The driver will reject "0" ticks, even though it probably shouldn't. See for details:
1617    // b/437177931.
1618    let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1619
1620    trace::instant!("alarms", "hrtimer:programmed",
1621        trace::Scope::Process,
1622        "resolution_ns" => resolution_nanos,
1623        "ticks" => useful_ticks
1624    );
1625    let timer_config_id = timer_config.id;
1626    let start_and_wait_fut = {
1627        let _sc = ScopedInc::new(&hrtimer_wait_count);
1628        hrtimer.start_and_wait(
1629            timer_config.id,
1630            &ffhh::Resolution::Duration(resolution_nanos),
1631            useful_ticks,
1632            clone_handle(&hrtimer_scheduled),
1633        )
1634    };
1635
1636    let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1637    let hrtimer_task = scope.spawn_local(async move {
1638        debug!("hrtimer_task: waiting for hrtimer driver response");
1639        trace::instant!("alarms", "hrtimer:started", trace::Scope::Process);
1640        let response = start_and_wait_fut.await;
1641        trace::instant!("alarms", "hrtimer:response", trace::Scope::Process);
1642        match response {
1643            Err(TimerOpsError::Fidl(e)) => {
1644                defer! {
1645                    // Allow hrtimer_scheduled to proceed anyways.
1646                    signal(&hrtimer_scheduled_if_error);
1647                }
1648                trace::instant!("alarms", "hrtimer:response:fidl_error", trace::Scope::Process);
1649                command_send
1650                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1651                    .unwrap();
1652                // BAD: no way to keep alive.
1653            }
1654            Err(TimerOpsError::Driver(e)) => {
1655                defer! {
1656                    // This should be idempotent if the error occurs after
1657                    // the timer was scheduled.
1658                    signal(&hrtimer_scheduled_if_error);
1659                }
1660                let driver_error_str = format!("{:?}", e);
1661                trace::instant!("alarms", "hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1662                // This is very common. For example, a "timer canceled" event
1663                // will result in this code path being hit.
1664                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1665                command_send
1666                    .start_send(Cmd::AlarmDriverError {
1667                        expired_deadline: now,
1668                        error: e,
1669                        timer_config_id,
1670                        resolution_nanos,
1671                        ticks: useful_ticks,
1672                    })
1673                    .unwrap();
1674                // BAD: no way to keep alive.
1675            }
1676            Ok(keep_alive) => {
1677                trace::instant!("alarms", "hrtimer:response:alarm", trace::Scope::Process);
1678                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1679                // May trigger sooner than the deadline.
1680                command_send
1681                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1682                    .unwrap();
1683            }
1684        }
1685        debug!("hrtimer_task: exiting task.");
1686        trace::instant!("alarms", "hrtimer:task_exit", trace::Scope::Process);
1687    }).into();
1688    debug!("schedule_hrtimer: waiting for event to be signaled");
1689
1690    {
1691        let _i = ScopedInc::new(&wait_signaled_count);
1692        // We must wait here to ensure that the wake alarm has been scheduled.
1693        log_long_op!(wait_signaled(&hrtimer_scheduled));
1694    }
1695
1696    let now_after_signaled = fasync::BootInstant::now();
1697    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1698    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1699        trace::duration!("alarms", "schedule_hrtimer:unusual_duration",
1700            "duration" => duration_until_scheduled.into_nanos());
1701        warn!(
1702            "unusual duration until hrtimer scheduled: {}",
1703            format_duration(duration_until_scheduled)
1704        );
1705    }
1706    // TODO: b/444236931: re-enable.
1707    //schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1708    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1709    TimerState { task: hrtimer_task, deadline }
1710}
1711
1712/// Notify all `timers` that `reference_instant` has been reached.
1713///
1714/// The notified `timers` are removed from the list of timers to notify.
1715///
1716/// Args:
1717/// - `timers`: the collection of currently available timers.
1718/// - `lease_prototype`: an EventPair used as a wake lease.
1719/// - `reference_instant`: the time instant used as a reference for alarm notification.
1720/// - `timer_ops_error`: if set, this is the error that happened while attempting to
1721///   schedule or trigger a timer in hardware.
1722fn notify_all(
1723    timers: &mut timers::Heap,
1724    lease_prototype: &zx::EventPair,
1725    reference_instant: fasync::BootInstant,
1726    timer_ops_error: Option<TimerOpsError>,
1727    _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1728) -> Result<usize> {
1729    trace::duration!("alarms", "notify_all");
1730    let now = fasync::BootInstant::now();
1731    let mut expired = 0;
1732    while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1733        expired += 1;
1734        // How much later than requested did the notification happen.
1735        let deadline = timer_node.get_boot_deadline();
1736        let alarm = timer_node.id().alarm();
1737        let alarm_id = alarm.to_string();
1738        trace::duration!("alarms", "notify_all:notified", "alarm_id" => &*alarm_id);
1739        fuchsia_trace::flow_step!("alarms", "hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1740        let conn_id = timer_node.id().conn.clone();
1741        let slack: zx::BootDuration = deadline - now;
1742        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1743            trace::duration!("alarms", "schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1744            // This alarm triggered noticeably later than it should have.
1745            warn!(
1746                "alarm id: {} had an unusually large slack: {}",
1747                alarm_id,
1748                format_duration(slack)
1749            );
1750        }
1751        if slack < zx::BootDuration::ZERO {
1752            // TODO: b/444236931: re-enable.
1753            //unusual_slack_histogram.insert(-slack.into_nanos());
1754        }
1755        if let Some(ref err) = timer_ops_error {
1756            // Canceled timers are getting notified with alarm, but not other
1757            // errors.
1758            if !err.is_canceled() {
1759                timer_node.get_responder().send(alarm, Err(err.clone().into()));
1760                continue;
1761            }
1762        }
1763        debug!(
1764            concat!(
1765                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1766                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1767            ),
1768            alarm_id,
1769            format_timer(deadline.into()),
1770            conn_id,
1771            format_timer(reference_instant.into()),
1772            format_timer(now.into()),
1773            format_duration(slack),
1774        );
1775        let lease = clone_handle(lease_prototype);
1776        trace::instant!("alarms", "notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1777        if let Some(Err(e)) = timer_node.get_responder().send(alarm, Ok(lease)) {
1778            error!("could not signal responder: {:?}", e);
1779        }
1780        trace::instant!("alarms", "notified", trace::Scope::Process);
1781    }
1782    trace::instant!("alarms", "notify", trace::Scope::Process, "expired_count" => expired);
1783    debug!("notify_all: expired count: {}", expired);
1784    Ok(expired)
1785    // A new timer is not scheduled yet here.
1786}
1787
1788/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1789/// named files in this directory. They are expected to come and go.
1790const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1791
1792/// Connects to the high resolution timer device driver.
1793///
1794/// This function watches the hrtimer device directory and connects to the first
1795/// available hrtimer device.
1796///
1797/// # Returns
1798/// A `Result` containing a `ffhh::DeviceProxy` on success, or an error if
1799/// the connection fails.
1800pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1801    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1802    let dir =
1803        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1804            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1805    let path = device_watcher::watch_for_files(&dir)
1806        .await
1807        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1808        .try_next()
1809        .await
1810        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1811    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1812    let path = path
1813        .to_str()
1814        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1815    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1816        .context("Failed to connect built-in service")
1817}
1818
1819#[cfg(test)]
1820mod tests {
1821    use super::*;
1822    use assert_matches::assert_matches;
1823    use diagnostics_assertions::{AnyProperty, assert_data_tree};
1824    use fuchsia_async::TestExecutor;
1825    use futures::select;
1826    use std::pin::pin;
1827    use test_case::test_case;
1828    use test_util::{assert_gt, assert_lt};
1829
1830    fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1831        let (_lease, peer) = zx::EventPair::create();
1832        peer
1833    }
1834
1835    #[test]
1836    fn timer_duration_no_overflow() {
1837        let duration1 = TimerDuration {
1838            resolution: zx::BootDuration::from_seconds(100_000_000),
1839            ticks: u64::MAX,
1840        };
1841        let duration2 = TimerDuration {
1842            resolution: zx::BootDuration::from_seconds(110_000_000),
1843            ticks: u64::MAX,
1844        };
1845        assert_eq!(duration1, duration1);
1846        assert_eq!(duration2, duration2);
1847
1848        assert_lt!(duration1, duration2);
1849        assert_gt!(duration2, duration1);
1850    }
1851
1852    #[test_case(
1853        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1854        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1855    )]
1856    #[test_case(
1857        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1858        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1859    )]
1860    #[test_case(
1861        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1862        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1863    )]
1864    #[test_case(
1865        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1866        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1867    )]
1868    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1869        assert_eq!(one, other);
1870    }
1871
1872    #[test_case(
1873        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1874        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1875    )]
1876    #[test_case(
1877        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1878        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1879    )]
1880    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1881        assert_lt!(one, other);
1882    }
1883
1884    #[test_case(
1885        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1886        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1887    )]
1888    #[test_case(
1889        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1890        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1891    )]
1892    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1893        assert_gt!(one, other);
1894    }
1895
1896    #[test_case(
1897        vec![zx::BootDuration::from_nanos(1)],
1898        100,
1899        zx::BootDuration::from_nanos(0),
1900        TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1901    )]
1902    #[test_case(
1903        vec![zx::BootDuration::from_nanos(1)],
1904        100,
1905        zx::BootDuration::from_nanos(50),
1906        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1907    )]
1908    #[test_case(
1909        vec![zx::BootDuration::from_nanos(2)],
1910        100,
1911        zx::BootDuration::from_nanos(50),
1912        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1913    )]
1914    #[test_case(
1915        vec![zx::BootDuration::from_nanos(3)],
1916        100,
1917        zx::BootDuration::from_nanos(50),
1918        // The closest duration is 51ns.
1919        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1920    )]
1921    #[test_case(
1922        vec![
1923            zx::BootDuration::from_nanos(3),
1924            zx::BootDuration::from_nanos(4)
1925        ],
1926        100,
1927        zx::BootDuration::from_nanos(50),
1928        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1929    )]
1930    #[test_case(
1931        vec![
1932            zx::BootDuration::from_nanos(1000),
1933        ],
1934        100,
1935        zx::BootDuration::from_nanos(50),
1936        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1937        "950ns negative slack is the best we can do"
1938    )]
1939    #[test_case(
1940        vec![
1941            zx::BootDuration::from_nanos(1),
1942        ],
1943        10,
1944        zx::BootDuration::from_nanos(50),
1945        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1946        "10ns positive slack is the best we can do"
1947    )]
1948    #[test_case(
1949        vec![
1950            zx::BootDuration::from_millis(1),
1951            zx::BootDuration::from_micros(100),
1952            zx::BootDuration::from_micros(10),
1953            zx::BootDuration::from_micros(1),
1954        ],
1955        20,  // Make only one of the resolutions above match.
1956        zx::BootDuration::from_micros(150),
1957        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1958        "Realistic case with resolutions from driver, should be 15us"
1959    )]
1960    #[test_case(
1961        vec![
1962            zx::BootDuration::from_millis(1),
1963            zx::BootDuration::from_micros(100),
1964            zx::BootDuration::from_micros(10),
1965            zx::BootDuration::from_micros(1),
1966        ],
1967        2000,  // Make only one of the resolutions above match.
1968        zx::BootDuration::from_micros(6000),
1969        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1970        "Coarser exact unit wins"
1971    )]
1972    #[test_case(
1973        vec![
1974            zx::BootDuration::from_millis(1),
1975            zx::BootDuration::from_millis(10),
1976            zx::BootDuration::from_millis(100),
1977        ],
1978        1000,
1979        zx::BootDuration::from_micros(-10),
1980        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1981        "Negative duration gets the smallest timer duration"
1982    )]
1983    #[test_case(
1984        vec![
1985            zx::BootDuration::from_millis(1),
1986            zx::BootDuration::from_millis(10),
1987            zx::BootDuration::from_millis(100),
1988        ],
1989        1000,
1990        zx::BootDuration::ZERO,
1991        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1992        "Zero duration gets the smallest timer duration"
1993    )]
1994    fn test_pick_setting(
1995        resolutions: Vec<zx::BootDuration>,
1996        max_ticks: u64,
1997        duration: zx::BootDuration,
1998        expected: TimerDuration,
1999    ) {
2000        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
2001        let actual = config.pick_setting(duration);
2002
2003        // .eq() does not work here, since we do not just require that the values
2004        // be equal, but also that the same resolution is used in both.
2005        assert_slack_eq(expected, actual);
2006    }
2007
2008    // TimerDuration assertion with human-friendly output in case of an error.
2009    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
2010        let slack = expected.duration() - actual.duration();
2011        assert_eq!(
2012            actual.resolution(),
2013            expected.resolution(),
2014            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
2015            expected,
2016            format_duration(expected.duration()),
2017            actual,
2018            format_duration(actual.duration()),
2019            format_duration(slack)
2020        );
2021        assert_eq!(
2022            actual.ticks(),
2023            expected.ticks(),
2024            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
2025            expected,
2026            format_duration(expected.duration()),
2027            actual,
2028            format_duration(actual.duration()),
2029            format_duration(slack)
2030        );
2031    }
2032
2033    #[derive(Debug)]
2034    enum FakeCmd {
2035        SetProperties {
2036            resolutions: Vec<zx::BootDuration>,
2037            max_ticks: i64,
2038            keep_alive: zx::EventPair,
2039            done: zx::Event,
2040        },
2041    }
2042
2043    use std::cell::RefCell;
2044    use std::rc::Rc;
2045
2046    // A fake that emulates some aspects of the hrtimer driver.
2047    //
2048    // Specifically it can be configured with different resolutions, and will
2049    // bomb out if any waiting methods are called twice in a succession, without
2050    // canceling the timer in between.
2051    fn fake_hrtimer_connection(
2052        scope: fasync::ScopeHandle,
2053        rcv: mpsc::Receiver<FakeCmd>,
2054    ) -> ffhh::DeviceProxy {
2055        debug!("fake_hrtimer_connection: entry.");
2056        let (hrtimer, mut stream) =
2057            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
2058        scope.clone().spawn_local(async move {
2059            let mut rcv = rcv.fuse();
2060            let timer_properties = Rc::new(RefCell::new(None));
2061            let wake_lease = Rc::new(RefCell::new(None));
2062
2063            // Set to true when the hardware timer is supposed to be running.
2064            // Hardware timer may not be reprogrammed without canceling it first,
2065            // make sure the tests fail the same way as production would.
2066            let timer_running = Rc::new(RefCell::new(false));
2067
2068            loop {
2069                let timer_properties = timer_properties.clone();
2070                let wake_lease = wake_lease.clone();
2071                select! {
2072                    cmd = rcv.next() => {
2073                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
2074                        match cmd {
2075                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
2076                                let mut timer_props = vec![];
2077                                for v in 0..10 {
2078                                    timer_props.push(ffhh::TimerProperties {
2079                                        supported_resolutions: Some(
2080                                            resolutions.iter()
2081                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
2082                                        max_ticks: Some(max_ticks.try_into().unwrap()),
2083                                        // start_and_wait method works.
2084                                        supports_wait: Some(true),
2085                                        id: Some(v),
2086                                        ..Default::default()
2087                                        },
2088                                    );
2089                                }
2090                                *timer_properties.borrow_mut() = Some(timer_props);
2091                                *wake_lease.borrow_mut() = Some(keep_alive);
2092                                debug!("set timer properties to: {:?}", timer_properties);
2093                                signal(&done);
2094                            }
2095                            e => {
2096                                panic!("unrecognized command: {:?}", e);
2097                            }
2098                        }
2099                        // Set some responses if we have them.
2100                    },
2101                    event = stream.next() => {
2102                        debug!("fake_hrtimer_connection: event: {:?}", event);
2103                        if let Some(Ok(event)) = event {
2104                            match event {
2105                                ffhh::DeviceRequest::Start { responder, .. } => {
2106                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2107                                    *timer_running.borrow_mut() = true;
2108                                    responder.send(Ok(())).expect("");
2109                                }
2110                                ffhh::DeviceRequest::Stop { responder, .. } => {
2111                                    *timer_running.borrow_mut() = false;
2112                                    responder.send(Ok(())).expect("");
2113                                }
2114                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2115                                    responder.send(Ok(1)).expect("");
2116                                }
2117                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
2118                                    responder.send(Ok(())).expect("");
2119                                }
2120                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2121                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2122                                    *timer_running.borrow_mut() = true;
2123                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2124                                    let ticks: i64 = ticks.try_into().unwrap();
2125                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
2126                                        ffhh::Resolution::Duration(e) => e,
2127                                        _ => {
2128                                            error!("resolution has an unexpected value");
2129                                            1
2130                                        }
2131                                    });
2132                                    let timer_running_clone = timer_running.clone();
2133                                    scope.spawn_local(async move {
2134                                        // Signaling the setup event allows the client to proceed
2135                                        // with post-scheduling work.
2136                                        signal(&setup_event);
2137
2138                                        // Respond after the requested sleep time. In tests this will
2139                                        // be sleeping in fake time.
2140                                        fasync::Timer::new(sleep_duration).await;
2141                                        *timer_running_clone.borrow_mut() = false;
2142                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2143                                        debug!("StartAndWait: hrtimer expired");
2144                                    });
2145                                }
2146                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2147                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2148                                    *timer_running.borrow_mut() = true;
2149                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2150                                }
2151                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
2152                                    if (*timer_properties).borrow().is_none() {
2153                                        error!("timer_properties is empty, this is not what you want!");
2154                                    }
2155                                    responder
2156                                        .send(ffhh::Properties {
2157                                            timers_properties: (*timer_properties).borrow().clone(),
2158                                            ..Default::default()
2159                                        })
2160                                        .expect("");
2161                                }
2162                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2163                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2164                                }
2165                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2166                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2167                                }
2168                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2169                            }
2170                        }
2171                    },
2172                }
2173            }
2174        });
2175        hrtimer
2176    }
2177
2178    fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2179        orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2180    }
2181
2182    struct TestContext {
2183        wake_proxy: fta::WakeAlarmsProxy,
2184        _scope: fasync::Scope,
2185        _cmd_tx: mpsc::Sender<FakeCmd>,
2186        // Use to manipulate the UTC clock from the test.
2187        utc_clock: fxr::UtcClock,
2188        utc_backstop: fxr::UtcInstant,
2189    }
2190
2191    impl TestContext {
2192        async fn new() -> Self {
2193            TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2194
2195            let scope = fasync::Scope::new();
2196            let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2197            let utc_clock =
2198                fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2199            let utc_clone = clone_utc_clock(&utc_clock);
2200            let (mut cmd_tx, wake_proxy) = {
2201                let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2202                let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2203
2204                let inspector = finspect::component::inspector();
2205                let alarms = Rc::new(Loop::new(
2206                    scope.to_handle(),
2207                    hrtimer_proxy,
2208                    inspector.root().create_child("test"),
2209                    utc_clone,
2210                ));
2211
2212                let (proxy, stream) =
2213                    fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2214                scope.spawn_local(async move {
2215                    serve(alarms, stream).await;
2216                });
2217                (tx, proxy)
2218            };
2219
2220            let (_wake_lease, peer) = zx::EventPair::create();
2221            let done = zx::Event::create();
2222            cmd_tx
2223                .start_send(FakeCmd::SetProperties {
2224                    resolutions: vec![zx::Duration::from_nanos(1)],
2225                    max_ticks: 100,
2226                    keep_alive: peer,
2227                    done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2228                })
2229                .unwrap();
2230
2231            // Wait until hrtimer configuration has completed.
2232            assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2233
2234            Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2235        }
2236    }
2237
2238    impl Drop for TestContext {
2239        fn drop(&mut self) {
2240            assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2241        }
2242    }
2243
2244    #[fuchsia::test(allow_stalls = false)]
2245    async fn test_basic_timed_wait() {
2246        let ctx = TestContext::new().await;
2247
2248        let deadline = zx::BootInstant::from_nanos(100);
2249        let setup_done = zx::Event::create();
2250        let mut set_task = ctx.wake_proxy.set_and_wait(
2251            deadline.into(),
2252            fta::SetMode::NotifySetupDone(
2253                setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2254            ),
2255            "Hello".into(),
2256        );
2257
2258        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2259
2260        let mut setup_done_task =
2261            pin!(fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED));
2262        assert_matches!(
2263            TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2264            Poll::Ready(Ok(_)),
2265            "Setup event not triggered after scheduling an alarm"
2266        );
2267
2268        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2269        assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2270    }
2271
2272    #[fuchsia::test(allow_stalls = false)]
2273    async fn test_basic_timed_wait_notify() {
2274        const ALARM_ID: &str = "Hello";
2275        let ctx = TestContext::new().await;
2276
2277        let (notifier_client, mut notifier_stream) =
2278            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2279        let setup_done = zx::Event::create();
2280        assert_matches!(
2281            ctx.wake_proxy
2282                .set(
2283                    notifier_client,
2284                    fidl::BootInstant::from_nanos(2),
2285                    fta::SetMode::NotifySetupDone(
2286                        setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2287                    ),
2288                    ALARM_ID,
2289                )
2290                .await,
2291            Ok(Ok(()))
2292        );
2293
2294        let mut done_task = pin!(fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED));
2295        assert_matches!(
2296            TestExecutor::poll_until_stalled(&mut done_task).await,
2297            Poll::Ready(Ok(_)),
2298            "Setup event not triggered after scheduling an alarm"
2299        );
2300
2301        let mut next_task = notifier_stream.next();
2302        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2303
2304        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2305        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2306
2307        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2308        assert_matches!(
2309            TestExecutor::poll_until_stalled(next_task).await,
2310            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2311        );
2312    }
2313
2314    #[fuchsia::test(allow_stalls = false)]
2315    async fn test_two_alarms_same() {
2316        const DEADLINE_NANOS: i64 = 100;
2317
2318        let ctx = TestContext::new().await;
2319
2320        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2321            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2322            fta::SetMode::KeepAlive(fake_wake_lease()),
2323            "Hello1".into(),
2324        );
2325        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2326            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2327            fta::SetMode::KeepAlive(fake_wake_lease()),
2328            "Hello2".into(),
2329        );
2330
2331        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2332        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2333
2334        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2335
2336        assert_matches!(
2337            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2338            Poll::Ready(Ok(Ok(_)))
2339        );
2340        assert_matches!(
2341            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2342            Poll::Ready(Ok(Ok(_)))
2343        );
2344    }
2345
2346    #[fuchsia::test(allow_stalls = false)]
2347    async fn test_two_alarms_same_notify() {
2348        const DEADLINE_NANOS: i64 = 100;
2349        const ALARM_ID_1: &str = "Hello1";
2350        const ALARM_ID_2: &str = "Hello2";
2351
2352        let ctx = TestContext::new().await;
2353
2354        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2355            let (notifier_client, notifier_stream) =
2356                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2357            assert_matches!(
2358                ctx.wake_proxy
2359                    .set(
2360                        notifier_client,
2361                        fidl::BootInstant::from_nanos(deadline_nanos),
2362                        fta::SetMode::KeepAlive(fake_wake_lease()),
2363                        alarm_id,
2364                    )
2365                    .await,
2366                Ok(Ok(()))
2367            );
2368            notifier_stream
2369        };
2370
2371        let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2372        let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2373
2374        let mut next_task_1 = notifier_1.next();
2375        let mut next_task_2 = notifier_2.next();
2376
2377        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2378        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2379
2380        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2381
2382        assert_matches!(
2383            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2384            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2385        );
2386        assert_matches!(
2387            TestExecutor::poll_until_stalled(&mut next_task_2).await,
2388            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2389        );
2390
2391        assert_matches!(
2392            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2393            Poll::Ready(None)
2394        );
2395        assert_matches!(
2396            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2397            Poll::Ready(None)
2398        );
2399    }
2400
2401    #[test_case(100, 200 ; "push out")]
2402    #[test_case(200, 100 ; "pull in")]
2403    #[fuchsia::test(allow_stalls = false)]
2404    async fn test_two_alarms_different(
2405        // One timer scheduled at this instant (fake time starts from zero).
2406        first_deadline_nanos: i64,
2407        // Another timer scheduled at this instant.
2408        second_deadline_nanos: i64,
2409    ) {
2410        let ctx = TestContext::new().await;
2411
2412        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2413            fidl::BootInstant::from_nanos(first_deadline_nanos),
2414            fta::SetMode::KeepAlive(fake_wake_lease()),
2415            "Hello1".into(),
2416        );
2417        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2418            fidl::BootInstant::from_nanos(second_deadline_nanos),
2419            fta::SetMode::KeepAlive(fake_wake_lease()),
2420            "Hello2".into(),
2421        );
2422
2423        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2424        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2425
2426        // Sort alarms by their deadlines.
2427        let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2428        tasks.sort_by(|a, b| a.0.cmp(&b.0));
2429        let [mut first_task, mut second_task] = tasks;
2430
2431        // Alarms should fire in order of deadlines.
2432        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2433        assert_matches!(
2434            TestExecutor::poll_until_stalled(&mut first_task.1).await,
2435            Poll::Ready(Ok(Ok(_)))
2436        );
2437        assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2438
2439        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2440        assert_matches!(
2441            TestExecutor::poll_until_stalled(&mut second_task.1).await,
2442            Poll::Ready(Ok(Ok(_)))
2443        );
2444    }
2445
2446    #[test_case(100, 200 ; "push out")]
2447    #[test_case(200, 100 ; "pull in")]
2448    #[fuchsia::test(allow_stalls = false)]
2449    async fn test_two_alarms_different_notify(
2450        // One timer scheduled at this instant (fake time starts from zero).
2451        first_deadline_nanos: i64,
2452        // Another timer scheduled at this instant.
2453        second_deadline_nanos: i64,
2454    ) {
2455        const ALARM_ID_1: &str = "Hello1";
2456        const ALARM_ID_2: &str = "Hello2";
2457
2458        let ctx = TestContext::new().await;
2459
2460        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2461            let (notifier_client, notifier_stream) =
2462                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2463            assert_matches!(
2464                ctx.wake_proxy
2465                    .set(
2466                        notifier_client,
2467                        fidl::BootInstant::from_nanos(deadline_nanos),
2468                        fta::SetMode::KeepAlive(fake_wake_lease()),
2469                        alarm_id,
2470                    )
2471                    .await,
2472                Ok(Ok(()))
2473            );
2474            notifier_stream
2475        };
2476
2477        // Sort alarms by their deadlines.
2478        let mut notifier_all = futures::stream::select_all([
2479            schedule(first_deadline_nanos, ALARM_ID_1).await,
2480            schedule(second_deadline_nanos, ALARM_ID_2).await,
2481        ]);
2482        let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2483            let mut tasks =
2484                [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2485            tasks.sort_by(|a, b| a.0.cmp(&b.0));
2486            tasks
2487        };
2488
2489        // Alarms should fire in order of deadlines.
2490        let mut next_task = notifier_all.next();
2491        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2492
2493        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2494        assert_matches!(
2495            TestExecutor::poll_until_stalled(next_task).await,
2496            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2497        );
2498
2499        let mut next_task = notifier_all.next();
2500        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2501
2502        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2503        assert_matches!(
2504            TestExecutor::poll_until_stalled(next_task).await,
2505            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2506        );
2507        assert_matches!(
2508            TestExecutor::poll_until_stalled(notifier_all.next()).await,
2509            Poll::Ready(None)
2510        );
2511    }
2512
2513    #[fuchsia::test(allow_stalls = false)]
2514    async fn test_alarm_immediate() {
2515        let ctx = TestContext::new().await;
2516        let mut set_task = ctx.wake_proxy.set_and_wait(
2517            fidl::BootInstant::INFINITE_PAST,
2518            fta::SetMode::KeepAlive(fake_wake_lease()),
2519            "Hello1".into(),
2520        );
2521        assert_matches!(
2522            TestExecutor::poll_until_stalled(&mut set_task).await,
2523            Poll::Ready(Ok(Ok(_)))
2524        );
2525    }
2526
2527    #[fuchsia::test(allow_stalls = false)]
2528    async fn test_alarm_immediate_notify() {
2529        const ALARM_ID: &str = "Hello";
2530        let ctx = TestContext::new().await;
2531
2532        let (notifier_client, mut notifier_stream) =
2533            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2534
2535        let mut set_task = ctx.wake_proxy.set(
2536            notifier_client,
2537            fidl::BootInstant::INFINITE_PAST,
2538            fta::SetMode::KeepAlive(fake_wake_lease()),
2539            ALARM_ID,
2540        );
2541        assert_matches!(
2542            TestExecutor::poll_until_stalled(&mut set_task).await,
2543            Poll::Ready(Ok(Ok(_)))
2544        );
2545        assert_matches!(
2546            TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2547            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2548        );
2549    }
2550
2551    // Rescheduling a timer will cancel the earlier call and use the new
2552    // deadline for the later call.
2553    #[test_case(200, 100 ; "pull in")]
2554    #[test_case(100, 200 ; "push out")]
2555    #[test_case(100, 100 ; "replace with the same deadline")]
2556    #[fuchsia::test(allow_stalls = false)]
2557    async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2558        const ALARM_ID: &str = "Hello";
2559
2560        let ctx = TestContext::new().await;
2561
2562        let schedule = |deadline_nanos: i64| {
2563            let setup_done = zx::Event::create();
2564            let task = ctx.wake_proxy.set_and_wait(
2565                fidl::BootInstant::from_nanos(deadline_nanos),
2566                fta::SetMode::NotifySetupDone(
2567                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2568                ),
2569                ALARM_ID.into(),
2570            );
2571            (task, setup_done)
2572        };
2573
2574        // Schedule timer with a long timeout first. Let it wait, then
2575        // try to reschedule the same timer
2576        let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2577        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2578        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2579
2580        // Schedule the same timer as above, but with a shorter deadline. This
2581        // should cancel the earlier call.
2582        let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2583        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2584        assert_matches!(
2585            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2586            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2587        );
2588        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2589
2590        // The later call will be fired exactly on the new shorter deadline.
2591        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2592            .await;
2593        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2594        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2595            .await;
2596        assert_matches!(
2597            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2598            Poll::Ready(Ok(Ok(_)))
2599        );
2600
2601        // The values in the inspector tree are fixed because the test
2602        // runs fully deterministically in fake time.
2603        assert_data_tree!(finspect::component::inspector(), root: {
2604            test: {
2605                hardware: {
2606                    // All alarms fired, so this should be "none".
2607                    current_deadline: "(none)",
2608                    remaining_until_alarm: "(none)",
2609                },
2610                now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2611                now_ns: override_deadline_nanos,
2612                pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2613                pending_timers_count: 0u64,
2614                requested_deadlines_ns: AnyProperty,
2615                schedule_delay_ns: AnyProperty,
2616                slack_ns: AnyProperty,
2617                boot_deadlines_count: AnyProperty,
2618                utc_deadlines_count: AnyProperty,
2619                debug_node: contains {},
2620            },
2621        });
2622    }
2623
2624    // Rescheduling a timer will send an error on the old notifier and use the
2625    // new notifier for the new deadline.
2626    #[fuchsia::test(allow_stalls = false)]
2627    async fn test_reschedule_notify() {
2628        const ALARM_ID: &str = "Hello";
2629        const INITIAL_DEADLINE_NANOS: i64 = 100;
2630        const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2631
2632        let ctx = TestContext::new().await;
2633
2634        let schedule = async |deadline_nanos: i64| {
2635            let (notifier_client, notifier_stream) =
2636                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2637            assert_matches!(
2638                ctx.wake_proxy
2639                    .set(
2640                        notifier_client,
2641                        fidl::BootInstant::from_nanos(deadline_nanos),
2642                        fta::SetMode::KeepAlive(fake_wake_lease()),
2643                        ALARM_ID.into(),
2644                    )
2645                    .await,
2646                Ok(Ok(()))
2647            );
2648            notifier_stream
2649        };
2650
2651        let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2652        let mut next_task_1 = notifier_1.next();
2653        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2654
2655        let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2656        let mut next_task_2 = notifier_2.next();
2657        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2658
2659        // First notifier is called with an error then closed.
2660        assert_matches!(
2661            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2662            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2663        );
2664        assert_matches!(
2665            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2666            Poll::Ready(None)
2667        );
2668
2669        // Second notifier is called upon the new deadline then closed.
2670        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2671            .await;
2672        assert_matches!(
2673            TestExecutor::poll_until_stalled(next_task_2).await,
2674            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2675        );
2676        assert_matches!(
2677            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2678            Poll::Ready(None)
2679        );
2680    }
2681
2682    // If we get two scheduling FIDL errors one after another, the wake alarm
2683    // manager must not lock up.
2684    #[fuchsia::test(allow_stalls = false)]
2685    async fn test_fidl_error_on_reschedule() {
2686        const DEADLINE_NANOS: i64 = 100;
2687
2688        let (wake_proxy, _stream) =
2689            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2690        drop(_stream);
2691
2692        assert_matches!(
2693            wake_proxy
2694                .set_and_wait(
2695                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2696                    fta::SetMode::KeepAlive(fake_wake_lease()),
2697                    "hello1".into(),
2698                )
2699                .await,
2700            Err(fidl::Error::ClientChannelClosed { .. })
2701        );
2702
2703        assert_matches!(
2704            wake_proxy
2705                .set_and_wait(
2706                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2707                    fta::SetMode::KeepAlive(fake_wake_lease()),
2708                    "hello2".into(),
2709                )
2710                .await,
2711            Err(fidl::Error::ClientChannelClosed { .. })
2712        );
2713    }
2714
2715    // Verify that if a UTC timer is scheduled in the future on the UTC timeline, then the
2716    // UTC clock is changed to move "now" beyond the timer's deadline, the timer fires.
2717    #[fuchsia::test(allow_stalls = false)]
2718    async fn test_set_and_wait_utc() {
2719        const ALARM_ID: &str = "Hello_set_and_wait_utc";
2720        let ctx = TestContext::new().await;
2721
2722        let now_boot = fasync::BootInstant::now();
2723        ctx.utc_clock
2724            .update(
2725                zx::ClockUpdate::builder()
2726                    .absolute_value(now_boot.into(), ctx.utc_backstop)
2727                    .build(),
2728            )
2729            .unwrap();
2730
2731        let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2732        let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2733            &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2734            fta::SetMode::KeepAlive(fake_wake_lease()),
2735            ALARM_ID,
2736        );
2737
2738        // Timer is not expired yet.
2739        assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2740
2741        // Move the UTC timeline.
2742        ctx.utc_clock
2743            .update(
2744                zx::ClockUpdate::builder()
2745                    .absolute_value(
2746                        now_boot.into(),
2747                        ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2748                    )
2749                    .build(),
2750            )
2751            .unwrap();
2752
2753        // See similar code in the test above.
2754        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2755        assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2756    }
2757}