alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
38use fuchsia_inspect::{IntProperty, NumericProperty, Property};
39use futures::channel::mpsc;
40use futures::sink::SinkExt;
41use futures::{StreamExt, TryStreamExt};
42use log::{debug, error, warn};
43use scopeguard::defer;
44use std::cell::RefCell;
45use std::rc::Rc;
46use std::sync::LazyLock;
47use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
48use zx::AsHandleRef;
49use {
50    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
51    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_runtime as fxr,
52    fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58/// The largest value of timer "ticks" that is still considered useful.
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61/// The smallest value of "ticks" that we can program into the driver. To wit,
62/// driver will reject "0" ticks, even though it probably shouldn't. See
63/// for details: b/437177931.
64static MIN_USEFUL_TICKS: u64 = 1;
65
66/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
67/// Starnix, and should eventually no longer be critical.
68const MAIN_TIMER_ID: usize = 6;
69
70/// This is what we consider a "long" delay in alarm operations.
71const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
72
73/// A macro that waits on a future, but if the future takes longer than
74/// 30 seconds to complete, logs a warning message.
75macro_rules! log_long_op {
76    ($fut:expr) => {{
77        use futures::FutureExt;
78        let fut = $fut;
79        futures::pin_mut!(fut);
80        loop {
81            let timeout = fasync::Timer::new(std::time::Duration::from_secs(30));
82            futures::select! {
83                res = fut.as_mut().fuse() => {
84                    break res;
85                }
86                _ = timeout.fuse() => {
87                    warn!("unexpected blocking: long-running async operation at {}:{}", file!(), line!());
88                    #[cfg(all(target_os = "fuchsia", not(doc)))]
89                    ::debug::backtrace_request_all_threads();
90                }
91            }
92        }
93    }};
94}
95
96/// Increments the value of an underlying inspect property during its lifetime.
97struct ScopedInc<'a> {
98    property: &'a IntProperty,
99}
100
101impl<'a> ScopedInc<'a> {
102    fn new(property: &'a IntProperty) -> Self {
103        property.add(1);
104        Self { property }
105    }
106}
107
108impl<'a> Drop for ScopedInc<'a> {
109    fn drop(&mut self) {
110        self.property.add(-1);
111    }
112}
113
114/// Compares two optional deadlines and returns true if the `before is different from `after.
115/// Nones compare as equal.
116fn is_deadline_changed(
117    before: Option<fasync::BootInstant>,
118    after: Option<fasync::BootInstant>,
119) -> bool {
120    match (before, after) {
121        (None, None) => false,
122        (None, Some(_)) | (Some(_), None) => true,
123        (Some(before), Some(after)) => before != after,
124    }
125}
126
127// Errors returnable from [TimerOps] calls.
128#[derive(Debug, Clone)]
129pub(crate) enum TimerOpsError {
130    /// The driver reported an error.
131    Driver(ffhh::DriverError),
132    /// FIDL-specific RPC error.
133    Fidl(fidl::Error),
134}
135
136impl Into<fta::WakeAlarmsError> for TimerOpsError {
137    /// Compute the error that gets propagated to callers, depending on messages
138    /// from the driver.
139    fn into(self) -> fta::WakeAlarmsError {
140        match self {
141            TimerOpsError::Fidl(fidl::Error::ClientChannelClosed { .. }) => {
142                fta::WakeAlarmsError::DriverConnection
143            }
144            TimerOpsError::Driver(ffhh::DriverError::InternalError) => fta::WakeAlarmsError::Driver,
145            _ => fta::WakeAlarmsError::Internal,
146        }
147    }
148}
149
150impl TimerOpsError {
151    fn is_canceled(&self) -> bool {
152        match self {
153            TimerOpsError::Driver(ffhh::DriverError::Canceled) => true,
154            _ => false,
155        }
156    }
157}
158
159trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
160    // nop
161}
162
163/// Abstracts away timer operations.
164#[async_trait(?Send)]
165pub(crate) trait TimerOps {
166    /// Stop the timer with the specified ID.
167    async fn stop(&self, id: u64);
168
169    /// Examine the timer's properties, such as supported resolutions and tick
170    /// counts.
171    async fn get_timer_properties(&self) -> TimerConfig;
172
173    /// This method must return an actual future, to handle the borrow checker:
174    /// making this async will assume that `self` remains borrowed, which will
175    /// thwart attempts to move the return value of this call into a separate
176    /// closure.
177    fn start_and_wait(
178        &self,
179        id: u64,
180        resolution: &ffhh::Resolution,
181        ticks: u64,
182        setup_event: zx::Event,
183    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
184}
185
186/// TimerOps backed by an actual hardware timer.
187struct HardwareTimerOps {
188    proxy: ffhh::DeviceProxy,
189}
190
191impl HardwareTimerOps {
192    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
193        Box::new(Self { proxy })
194    }
195}
196
197#[async_trait(?Send)]
198impl TimerOps for HardwareTimerOps {
199    async fn stop(&self, id: u64) {
200        let _ = self
201            .proxy
202            .stop(id)
203            .await
204            .map(|result| {
205                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
206            })
207            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
208    }
209
210    async fn get_timer_properties(&self) -> TimerConfig {
211        match log_long_op!(self.proxy.get_properties()) {
212            Ok(p) => {
213                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
214                debug!("get_timer_properties: got: {:?}", timers_properties);
215
216                // Pick the correct hrtimer to use for wakes.
217                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
218                    // Mostly vim3, where we have pre-existing timer allocations
219                    // that we don't need to change.
220                    MAIN_TIMER_ID
221                } else if timers_properties.len() > 0 {
222                    // Newer devices that don't need to allocate timer IDs, and/or
223                    // may not even have as many timers as vim3 does. But, at least
224                    // one timer is needed.
225                    0
226                } else {
227                    // Give up.
228                    return TimerConfig::new_empty();
229                };
230                let main_timer_properties = &timers_properties[timer_index];
231                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
232                // Not sure whether it is useful to have more ticks than this, so limit it.
233                let max_ticks: u64 = std::cmp::min(
234                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
235                    *MAX_USEFUL_TICKS,
236                );
237                let resolutions = &main_timer_properties
238                    .supported_resolutions
239                    .as_ref()
240                    .expect("supported_resolutions is populated")
241                    .iter()
242                    .last() //  Limits the resolution to the coarsest available.
243                    .map(|r| match *r {
244                        ffhh::Resolution::Duration(d) => d,
245                        _ => {
246                            error!(
247                            "get_timer_properties: Unknown resolution type, returning millisecond."
248                        );
249                            MSEC_IN_NANOS
250                        }
251                    })
252                    .map(|d| zx::BootDuration::from_nanos(d))
253                    .into_iter() // Used with .last() above.
254                    .collect::<Vec<_>>();
255                let timer_id = main_timer_properties.id.expect("timer ID is always present");
256                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
257            }
258            Err(e) => {
259                error!("could not get timer properties: {:?}", e);
260                TimerConfig::new_empty()
261            }
262        }
263    }
264
265    fn start_and_wait(
266        &self,
267        id: u64,
268        resolution: &ffhh::Resolution,
269        ticks: u64,
270        setup_event: zx::Event,
271    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
272        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
273        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
274    }
275}
276
277// Untangles the borrow checker issues that otherwise result from making
278// TimerOps::start_and_wait an async function.
279struct HwResponseFut {
280    pinner: std::pin::Pin<
281        Box<
282            fidl::client::QueryResponseFut<
283                ffhh::DeviceStartAndWaitResult,
284                fidl::encoding::DefaultFuchsiaResourceDialect,
285            >,
286        >,
287    >,
288}
289
290use std::task::Poll;
291impl SawResponseFut for HwResponseFut {}
292impl std::future::Future for HwResponseFut {
293    type Output = Result<zx::EventPair, TimerOpsError>;
294    fn poll(
295        mut self: std::pin::Pin<&mut Self>,
296        cx: &mut std::task::Context<'_>,
297    ) -> std::task::Poll<Self::Output> {
298        let inner_poll = self.pinner.as_mut().poll(cx);
299        match inner_poll {
300            Poll::Ready(result) => Poll::Ready(match result {
301                Ok(Ok(keep_alive)) => Ok(keep_alive),
302                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
303                Err(e) => Err(TimerOpsError::Fidl(e)),
304            }),
305            Poll::Pending => Poll::Pending,
306        }
307    }
308}
309
310/// Stops a currently running hardware timer.
311async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
312    trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
313    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
314    hrtimer.stop(timer_config.id).await;
315    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
316}
317
318// The default size of the channels created in this module.
319// This is very unlikely to create bottlenecks.
320const CHANNEL_SIZE: usize = 1000;
321
322/// A type handed around between the concurrent loops run by this module.
323#[derive(Debug)]
324enum Cmd {
325    /// Request a timer to be started.
326    Start {
327        /// The unique connection ID.
328        conn_id: zx::Koid,
329        /// A timestamp (presumably in the future), at which to expire the timer.
330        deadline: timers::Deadline,
331        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
332        //
333        // Optional, because not always needed:
334        //
335        // * `mode` is required for hanging get API calls (e.g. `StartAndWait`), as we must signal
336        //   when the alarm is scheduled.
337        // * The calls such as `SetUtc` which return only upon scheduling do not need a `mode`, as
338        //   the caller can wait for the call to return immediately.
339        mode: Option<fta::SetMode>,
340        /// An alarm identifier, chosen by the caller.
341        alarm_id: String,
342        /// A responder that will be called when the timer expires. The
343        /// client end of the connection will block until we send something
344        /// on this responder.
345        ///
346        /// This is packaged into a Rc... only because both the "happy path"
347        /// and the error path must consume the responder.  This allows them
348        /// to be consumed, without the responder needing to implement Default.
349        responder: Rc<dyn timers::Responder>,
350    },
351    StopById {
352        done: zx::Event,
353        timer_id: timers::Id,
354    },
355    Alarm {
356        expired_deadline: fasync::BootInstant,
357        keep_alive: fidl::EventPair,
358    },
359    AlarmFidlError {
360        expired_deadline: fasync::BootInstant,
361        error: fidl::Error,
362    },
363    AlarmDriverError {
364        expired_deadline: fasync::BootInstant,
365        error: ffhh::DriverError,
366
367        // Added these for debugging details, otherwise not necessary.
368        timer_config_id: u64,
369        resolution_nanos: i64,
370        ticks: u64,
371    },
372    /// The UTC clock transformation has been updated.
373    UtcUpdated {
374        // The new boot-to-utc clock transformation.
375        transform: fxr::UtcClockTransform,
376    },
377}
378
379impl std::fmt::Display for Cmd {
380    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
381        match self {
382            Cmd::Start { conn_id, deadline, alarm_id, .. } => {
383                write!(
384                    f,
385                    "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
386                    alarm_id, conn_id, deadline,
387                )
388            }
389            Cmd::Alarm { expired_deadline, .. } => {
390                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
391            }
392            Cmd::AlarmFidlError { expired_deadline, error } => {
393                write!(
394                    f,
395                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
396                    format_timer((*expired_deadline).into()),
397                    error
398                )
399            }
400            Cmd::AlarmDriverError { expired_deadline, error, .. } => {
401                write!(
402                    f,
403                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
404                    format_timer((*expired_deadline).into()),
405                    error
406                )
407            }
408            Cmd::StopById { timer_id, done: _ } => {
409                write!(f, "StopById[timerId={}]", timer_id,)
410            }
411            Cmd::UtcUpdated { transform } => {
412                write!(f, "UtcUpdated[timerId={transform:?}]")
413            }
414        }
415    }
416}
417
418/// Extracts a KOID from the underlying channel of the provided stream.
419///
420/// This function deconstructs the provided stream to access the underlying
421/// channel and extract its KOID. It then reconstructs the stream and returns
422/// it to the caller along with the KOID.
423///
424/// # Args
425/// - `stream`: The `fta::WakeAlarmsRequestStream` to extract the KOID from.
426///
427/// # Returns
428/// A tuple containing the `zx::Koid` of the stream's channel and the
429/// reconstructed `fta::WakeAlarmsRequestStream`.
430pub fn get_stream_koid(
431    stream: fta::WakeAlarmsRequestStream,
432) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
433    let (inner, is_terminated) = stream.into_inner();
434    let koid = inner.channel().as_channel().get_koid().expect("infallible");
435    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
436    (koid, stream)
437}
438
439/// Serves a single Wake API client.
440///
441/// This function processes incoming requests from a `fta::WakeAlarmsRequestStream`,
442/// handling each request by calling `handle_request`. It continues to process
443/// requests until the stream is exhausted.
444///
445/// # Args
446/// - `timer_loop`: A reference-counted pointer to the `Loop` that manages timers.
447/// - `requests`: The stream of incoming `fta::WakeAlarmsRequest` from a client.
448pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
449    let timer_loop = timer_loop.clone();
450    let timer_loop_send = || timer_loop.get_sender();
451    let (conn_id, mut requests) = get_stream_koid(requests);
452    let mut request_count = 0;
453    debug!("alarms::serve: opened connection: {:?}", conn_id);
454    while let Some(maybe_request) = requests.next().await {
455        request_count += 1;
456        debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
457        match maybe_request {
458            Ok(request) => {
459                // Should return quickly.
460                handle_request(conn_id, timer_loop_send(), request).await;
461            }
462            Err(e) => {
463                warn!("alarms::serve: error in request: {:?}", e);
464            }
465        }
466        debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
467    }
468    // Check if connection closure was intentional. It is way too easy to close
469    // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
470    warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
471}
472
473async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
474    let done = zx::Event::create();
475    let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
476    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
477        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
478    }
479    wait_signaled(&done).await;
480}
481
482/// Processes a single Wake API request from a single client.
483/// This function is expected to return quickly.
484///
485/// # Args
486/// - `conn_id`: the unique identifier of the connection producing these requests.
487/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
488/// - `request`: a single inbound Wake FIDL API request.
489async fn handle_request(
490    conn_id: zx::Koid,
491    mut cmd: mpsc::Sender<Cmd>,
492    request: fta::WakeAlarmsRequest,
493) {
494    match request {
495        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
496            // Since responder is consumed by the happy path and the error path, but not both,
497            // and because the responder does not implement Default, this is a way to
498            // send it in two mutually exclusive directions.  Each direction will reverse
499            // this wrapping once the responder makes it to the other side.
500            //
501            // Rc required because of sharing a noncopyable struct; RefCell required because
502            // borrow_mut() is needed to move out; and Option is required so we can
503            // use take() to replace the struct with None so it does not need to leave
504            // a Default in its place.
505            let responder = Rc::new(RefCell::new(Some(responder)));
506
507            // Alarm is not scheduled yet!
508            debug!(
509                "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
510                alarm_id,
511                conn_id,
512                format_timer(deadline.into())
513            );
514            // Expected to return quickly.
515            let deadline = timers::Deadline::Boot(deadline.into());
516            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
517                conn_id,
518                deadline,
519                mode: Some(mode),
520                alarm_id: alarm_id.clone(),
521                responder: responder.clone(),
522            })) {
523                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
524                responder
525                    .borrow_mut()
526                    .take()
527                    .expect("always present if call fails")
528                    .send(Err(fta::WakeAlarmsError::Internal))
529                    .unwrap();
530            }
531        }
532        fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
533            // Quickly get rid of the custom wake alarms deadline type.
534            let deadline =
535                timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
536
537            // The rest of this match branch is the same as for `SetAndWait`. However, the handling
538            // is for now simple enough that we don't need to explore factoring common actions out.
539            let responder = Rc::new(RefCell::new(Some(responder)));
540            debug!(
541                "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
542            );
543
544            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
545                conn_id,
546                deadline,
547                mode: Some(mode),
548                alarm_id: alarm_id.clone(),
549                responder: responder.clone(),
550            })) {
551                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
552                responder
553                    .borrow_mut()
554                    .take()
555                    .expect("always present if call fails")
556                    .send(Err(fta::WakeAlarmsError::Internal))
557                    .unwrap();
558            }
559        }
560        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
561            // TODO: b/383062441 - make this into an async task so that we wait
562            // less to schedule the next alarm.
563            log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
564        }
565        fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
566            // Alarm is not scheduled yet!
567            debug!(
568                "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
569                format_timer(deadline.into())
570            );
571            // Expected to return quickly.
572            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
573                conn_id,
574                deadline: timers::Deadline::Boot(deadline.into()),
575                mode: Some(mode),
576                alarm_id: alarm_id.clone(),
577                responder: Rc::new(RefCell::new(Some(notifier))),
578            })) {
579                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
580                responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
581            } else {
582                // Successfully scheduled the alarm.
583                responder.send(Ok(())).unwrap();
584            }
585        }
586        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
587    };
588}
589
590/// Represents a single alarm event processing loop.
591///
592/// One instance is created per each alarm-capable low-level device. The `Loop`
593/// is responsible for managing the lifecycle of wake alarms, including their
594/// creation, scheduling, and cancellation. It interacts with the underlying
595/// hardware timer through a `TimerOps` trait object.
596pub struct Loop {
597    // Given to any clients that need to send messages to `_task`
598    // via [get_sender].
599    snd: mpsc::Sender<Cmd>,
600}
601
602impl Loop {
603    /// Creates a new instance of `Loop`.
604    ///
605    /// This function initializes a new `Loop` with a connection to a low-level
606    /// hardware timer device. It spawns two background tasks: one for the main
607    /// timer event loop and another for monitoring UTC clock changes.
608    ///
609    /// # Args
610    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
611    /// - `device_proxy`: A `ffhh::DeviceProxy` for communicating with the hardware timer.
612    /// - `inspect`: A `finspect::Node` for recording diagnostics.
613    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
614    ///
615    /// # Returns
616    /// A new instance of `Loop`.
617    pub fn new(
618        scope: fasync::ScopeHandle,
619        device_proxy: ffhh::DeviceProxy,
620        inspect: finspect::Node,
621        utc_clock: fxr::UtcClock,
622    ) -> Self {
623        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
624        Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
625    }
626
627    /// Creates a new instance of `Loop` with emulated wake alarms.
628    ///
629    /// This function is similar to `new`, but it uses an emulated timer instead
630    /// of a real hardware timer. This is useful for testing environments where
631    /// a hardware timer may not be available.
632    ///
633    /// # Args
634    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
635    /// - `inspect`: A `finspect::Node` for recording diagnostics.
636    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
637    ///
638    /// # Returns
639    /// A new instance of `Loop` with an emulated timer.
640    pub fn new_emulated(
641        scope: fasync::ScopeHandle,
642        inspect: finspect::Node,
643        utc_clock: fxr::UtcClock,
644    ) -> Self {
645        let timer_ops = Box::new(EmulationTimerOps::new());
646        Loop::new_internal(scope, timer_ops, inspect, utc_clock)
647    }
648
649    fn new_internal(
650        scope: fasync::ScopeHandle,
651        timer_ops: Box<dyn TimerOps>,
652        inspect: finspect::Node,
653        utc_clock: fxr::UtcClock,
654    ) -> Self {
655        let utc_transform = Rc::new(RefCell::new(
656            utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
657        ));
658
659        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
660        let loop_scope = scope.clone();
661
662        scope.spawn_local(wake_timer_loop(
663            loop_scope,
664            snd.clone(),
665            rcv,
666            timer_ops,
667            inspect,
668            utc_transform,
669        ));
670        scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
671        Self { snd }
672    }
673
674    /// Gets a copy of a channel through which async commands may be sent to
675    /// the [Loop].
676    fn get_sender(&self) -> mpsc::Sender<Cmd> {
677        self.snd.clone()
678    }
679}
680
681// Forwards the clock transformation of an updated clock into the alarm manager, to allow
682// correcting the boot time deadlines of clocks on the UTC timeline.
683async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
684    let koid = utc_clock.as_handle_ref().get_koid();
685    log::info!("monitor_utc_clock_changes: entry");
686    loop {
687        // CLOCK_UPDATED signal is self-clearing.
688        fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
689            .await
690            .expect("UTC clock is readable");
691
692        let transform =
693            utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
694        log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
695        if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
696            // This is OK in tests.
697            log::warn!("monitor_utc_clock_changes: exit: {err:?}");
698            break;
699        }
700    }
701}
702
703/// Clones a handle infallibly with `zx::Rights::SAME_RIGHTS`.
704///
705/// This function duplicates a handle, preserving its rights. It will panic if
706/// the handle duplication fails, which is not expected to happen under normal
707/// circumstances.
708///
709/// # Args
710/// - `handle`: A reference to a handle-based object to be cloned.
711///
712/// # Returns
713/// A new handle with the same rights as the original.
714pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
715    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
716}
717
718async fn wait_signaled<H: HandleBased>(handle: &H) {
719    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
720}
721
722pub(crate) fn signal<H: HandleBased>(handle: &H) {
723    handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
724}
725
726/// A [TimerDuration] represents a duration of time that can be expressed by
727/// a discrete timer register.
728///
729/// This is a low-level representation of time duration, used in interaction with
730/// hardware devices. It is therefore necessarily discretized, with adaptive
731/// resolution, depending on the physical characteristics of the underlying
732/// hardware timer that it models.
733#[derive(Debug, Clone, Copy)]
734struct TimerDuration {
735    // The resolution of each one of the `ticks` below.
736    resolution: zx::BootDuration,
737    // The number of ticks that encodes time duration. Each "tick" represents
738    // one unit of `resolution` above.
739    ticks: u64,
740}
741
742/// This and the comparison traits below are used to allow TimerDuration
743/// calculations in a compact form.
744impl Eq for TimerDuration {}
745
746impl std::cmp::PartialOrd for TimerDuration {
747    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
748        Some(self.cmp(other))
749    }
750}
751
752impl std::cmp::PartialEq for TimerDuration {
753    fn eq(&self, other: &Self) -> bool {
754        self.cmp(other) == std::cmp::Ordering::Equal
755    }
756}
757
758impl std::cmp::Ord for TimerDuration {
759    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
760    /// no matter the resolutions.
761    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
762        let self_ticks_128: i128 = self.ticks as i128;
763        let self_resolution: i128 = self.resolution_as_nanos() as i128;
764        let self_nanos = self_resolution * self_ticks_128;
765
766        let other_ticks_128: i128 = other.ticks as i128;
767        let other_resolution: i128 = other.resolution_as_nanos() as i128;
768        let other_nanos = other_resolution * other_ticks_128;
769
770        self_nanos.cmp(&other_nanos)
771    }
772}
773
774impl std::fmt::Display for TimerDuration {
775    /// Human readable TimerDuration exposes both the tick count and the resolution,
776    /// in the format of "ticks x resolution", with an end result of
777    /// `10x5ms` for example.
778    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
779        let ticks = self.ticks;
780        let resolution = self.resolution();
781        // Example: 10x1ms
782        write!(f, "{}x{}", ticks, format_duration(resolution),)
783    }
784}
785
786impl TimerDuration {
787    /// The maximum representable TimerDuration that we allow.
788    fn max() -> Self {
789        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
790    }
791
792    /// The zero [TimerDuration].
793    fn zero() -> Self {
794        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
795    }
796
797    /// Creates a new timer duration with the given parameters.
798    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
799        Self { resolution, ticks }
800    }
801
802    /// Creates a new timer duration using the resolution from `res_source` and
803    /// a specified number of ticks.
804    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
805        Self::new(res_source.resolution, ticks)
806    }
807
808    /// Returns the time duration represented by this TimerDuration.
809    ///
810    /// Due to the way duration is expressed, the same time duration
811    /// can be represented in multiple ways.
812    fn duration(&self) -> zx::BootDuration {
813        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
814        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
815        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
816    }
817
818    /// The resolution of this TimerDuration
819    fn resolution(&self) -> zx::BootDuration {
820        self.resolution
821    }
822
823    fn resolution_as_nanos(&self) -> u64 {
824        self.resolution().into_nanos().try_into().expect("resolution is never negative")
825    }
826
827    /// The number of ticks of this [TimerDuration].
828    fn ticks(&self) -> u64 {
829        self.ticks
830    }
831}
832
833impl From<zx::BootDuration> for TimerDuration {
834    fn from(d: zx::BootDuration) -> TimerDuration {
835        let nanos = d.into_nanos();
836        assert!(nanos >= 0);
837        let nanos_u64 = nanos.try_into().expect("guarded by assert");
838        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
839    }
840}
841
842impl std::ops::Div for TimerDuration {
843    type Output = u64;
844    fn div(self, rhs: Self) -> Self::Output {
845        let self_nanos = self.resolution_as_nanos() * self.ticks;
846        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
847        self_nanos / rhs_nanos
848    }
849}
850
851impl std::ops::Mul<u64> for TimerDuration {
852    type Output = Self;
853    fn mul(self, rhs: u64) -> Self::Output {
854        Self::new(self.resolution, self.ticks * rhs)
855    }
856}
857
858/// Contains the configuration of a specific timer.
859#[derive(Debug)]
860pub(crate) struct TimerConfig {
861    /// The resolutions supported by this timer. Each entry is one possible
862    /// duration for on timer "tick".  The resolution is picked when a timer
863    /// request is sent.
864    ///
865    /// The resolutions MUST be sorted from finest (index 0) to coarsest.
866    ///
867    /// There MUST be at least one resolution.
868    resolutions: Vec<zx::BootDuration>,
869    /// The maximum count of "ticks" that the timer supports. The timer usually
870    /// has a register that counts up or down based on a clock signal with
871    /// the period specified by `resolutions`.  This is the maximum value that
872    /// the counter can count to without overflowing.
873    max_ticks: u64,
874    /// The stable ID of the timer with the above configuration.
875    id: u64,
876}
877
878impl TimerConfig {
879    /// Creates a new timer config with supported timer resolutions and the max
880    /// ticks value for the timer's counter.
881    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
882        debug!(
883            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
884            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
885            max_ticks,
886            timer_id
887        );
888        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
889        TimerConfig { resolutions, max_ticks, id: timer_id }
890    }
891
892    fn new_empty() -> Self {
893        error!("TimerConfig::new_empty() called, this is not OK.");
894        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
895    }
896
897    // Picks the most appropriate timer setting for it to fire as close as possible
898    // when `duration` expires.
899    //
900    // If duration is too far in the future for what the timer supports,
901    // return a smaller value, to allow the timer to be reprogrammed multiple
902    // times.
903    //
904    // If the available menu of resolutions is such that we can wake only after
905    // the intended deadline, begrudgingly return that option.
906    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
907        assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
908
909        // Driver does not support zero ticks, so we must accept the finest resolution duration
910        // instead.
911        if duration <= zx::BootDuration::ZERO {
912            return TimerDuration::new(self.resolutions[0], 1);
913        }
914
915        //  0         |-------------->|<---------------|
916        //  |---------+---------------+----------------+---->
917        //  |---------^               |                |
918        //  | best positive slack     |                |
919        //  |-------------------------^ duration       |
920        //  |------------------------------------------^ best negative slack.
921        let mut best_positive_slack = TimerDuration::zero();
922        let mut best_negative_slack = TimerDuration::max();
923
924        if self.max_ticks == 0 {
925            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
926        }
927        let duration_slack: TimerDuration = duration.into();
928
929        for res1 in self.resolutions.iter() {
930            let smallest_unit = TimerDuration::new(*res1, 1);
931            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
932
933            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
934            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
935
936            if smallest_slack_larger_than_duration {
937                if duration_slack == TimerDuration::zero() {
938                    best_negative_slack = TimerDuration::zero();
939                } else if smallest_unit < best_negative_slack {
940                    best_negative_slack = smallest_unit;
941                }
942            }
943            if largest_slack_smaller_than_duration {
944                if max_tick_at_res > best_positive_slack
945                    || best_positive_slack == TimerDuration::zero()
946                {
947                    best_positive_slack = max_tick_at_res;
948                }
949            }
950
951            // "Regular" case.
952            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
953                // Check whether duration divides evenly into the available slack options
954                // for this resolution.  If it does, then that is the slack we're looking for.
955                let q = duration_slack / smallest_unit;
956                let d = smallest_unit * q;
957                if d == duration_slack {
958                    // Exact match, we can return right now.
959                    return d;
960                } else {
961                    // Not an exact match, so q ticks is before, but q+1 is after.
962                    if d > best_positive_slack {
963                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
964                    }
965                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
966                    if d_plus < best_negative_slack {
967                        best_negative_slack = d_plus;
968                    }
969                }
970            }
971        }
972
973        let p_slack = duration - best_positive_slack.duration();
974        let n_slack = best_negative_slack.duration() - duration;
975
976        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
977        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
978        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
979        // us to pick a finer count in the next go.
980        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
981            best_positive_slack
982        } else {
983            best_negative_slack
984        };
985        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
986        assert!(
987            ret.duration().into_nanos() >= 0,
988            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
989            ret,
990            format_duration(p_slack),
991            format_duration(n_slack),
992            format_duration(duration),
993            best_positive_slack,
994            best_negative_slack,
995            duration_slack,
996            p_slack != zx::BootDuration::ZERO,
997            p_slack,
998            zx::BootDuration::ZERO,
999        );
1000        ret
1001    }
1002}
1003
1004async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1005    debug!("get_timer_properties: requesting timer properties.");
1006    hrtimer.get_timer_properties().await
1007}
1008
1009/// The state of a single hardware timer that we must bookkeep.
1010struct TimerState {
1011    // The task waiting for the proximate timer to expire.
1012    task: fasync::Task<()>,
1013    // The deadline that the above task is waiting for.
1014    deadline: fasync::BootInstant,
1015}
1016
1017/// The command loop for timer interaction.  All changes to the wake alarm device programming
1018/// come in form of commands through `cmd`.
1019///
1020/// Args:
1021/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1022/// - `cmds``: the input queue of alarm related commands.
1023/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1024/// - `inspect`: the inspect node to record loop info into.
1025async fn wake_timer_loop(
1026    scope: fasync::ScopeHandle,
1027    snd: mpsc::Sender<Cmd>,
1028    mut cmds: mpsc::Receiver<Cmd>,
1029    timer_proxy: Box<dyn TimerOps>,
1030    inspect: finspect::Node,
1031    utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1032) {
1033    debug!("wake_timer_loop: started");
1034
1035    let mut timers = timers::Heap::new(utc_transform.clone());
1036    let timer_config = get_timer_properties(&timer_proxy).await;
1037
1038    // Keeps the currently executing HrTimer closure.  This is not read from, but
1039    // keeps the timer task active.
1040    #[allow(clippy::collection_is_never_read)]
1041    let mut hrtimer_status: Option<TimerState> = None;
1042
1043    // Initialize inspect properties. This must be done only once.
1044    //
1045    // Take note that these properties are updated when the `cmds` loop runs.
1046    // This means that repeated reads while no `cmds` activity occurs will return
1047    // old readings.  This is to ensure a consistent ability to replay the last
1048    // loop run if needed.
1049    let now_prop = inspect.create_int("now_ns", 0);
1050    let now_formatted_prop = inspect.create_string("now_formatted", "");
1051    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1052    let pending_timers_prop = inspect.create_string("pending_timers", "");
1053    let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1054        "requested_deadlines_ns",
1055        finspect::ExponentialHistogramParams {
1056            floor: 0,
1057            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1058            // Allows capturing deadlines up to dozens of days.
1059            step_multiplier: 10,
1060            buckets: 16,
1061        },
1062    );
1063    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1064        "slack_ns",
1065        finspect::ExponentialHistogramParams {
1066            floor: 0,
1067            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1068            step_multiplier: 10,
1069            buckets: 16,
1070        },
1071    );
1072    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1073        "schedule_delay_ns",
1074        finspect::ExponentialHistogramParams {
1075            floor: 0,
1076            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1077            step_multiplier: 10,
1078            buckets: 16,
1079        },
1080    );
1081    let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1082    let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1083    // Internals of what was programmed into the wake alarms hardware.
1084    let hw_node = inspect.create_child("hardware");
1085    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1086    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1087
1088    // Debug nodes for b/454085350.
1089    let debug_node = inspect.create_child("debug_node");
1090    let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1091    let start_count = debug_node.create_int("start_count", 0);
1092    let responder_count = debug_node.create_int("responder_count", 0);
1093    let stop_count = debug_node.create_int("stop", 0);
1094    let stop_responder_count = debug_node.create_int("stop_responder", 0);
1095    let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1096    let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1097    let alarm_count = debug_node.create_int("alarm", 0);
1098    let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1099    let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1100    let utc_update_count = debug_node.create_int("utc_update", 0);
1101    let status_count = debug_node.create_int("status", 0);
1102    let loop_count = debug_node.create_int("loop_count", 0);
1103
1104    let hrtimer_node = debug_node.create_child("hrtimer");
1105
1106    const LRU_CACHE_CAPACITY: usize = 100;
1107    let mut error_cache = lru_cache::LruCache::new(LRU_CACHE_CAPACITY);
1108
1109    while let Some(cmd) = cmds.next().await {
1110        let _i = ScopedInc::new(&loop_count);
1111        trace::duration!(c"alarms", c"Cmd");
1112        // Use a consistent notion of "now" across commands.
1113        let now = fasync::BootInstant::now();
1114        now_prop.set(now.into_nanos());
1115        trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1116        match cmd {
1117            Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1118                let _i = ScopedInc::new(&start_count);
1119                trace::duration!(c"alarms", c"Cmd::Start");
1120                fuchsia_trace::flow_step!(
1121                    c"alarms",
1122                    c"hrtimer_lifecycle",
1123                    timers::get_trace_id(&alarm_id)
1124                );
1125                // NOTE: hold keep_alive until all work is done.
1126                debug!(
1127                    "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow:      {}",
1128                    alarm_id,
1129                    conn_id,
1130                    deadline,
1131                    format_timer(now.into()),
1132                );
1133
1134                defer! {
1135                    let _i = ScopedInc::new(&start_notify_setup_count);
1136                    // This is the only option that requires further action.
1137                    if let Some(mode) = mode {
1138                        if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1139                            // Must signal once the setup is completed.
1140                            signal(&setup_done);
1141                            debug!("wake_timer_loop: START: setup_done signaled");
1142                        };
1143                    }
1144                }
1145                let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1146
1147                // TODO: b/444236931: re-enable.
1148                //// Bookkeeping, record the incidence of deadline types.
1149                //deadline_histogram_prop.insert((deadline_boot - now).into_nanos());
1150                match deadline {
1151                    timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1152                    timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1153                };
1154
1155                if timers::Heap::expired(now, deadline_boot) {
1156                    trace::duration!(c"alarms", c"Cmd::Start:immediate");
1157                    fuchsia_trace::flow_step!(
1158                        c"alarms",
1159                        c"hrtimer_lifecycle",
1160                        timers::get_trace_id(&alarm_id)
1161                    );
1162                    // A timer set into now or the past expires right away.
1163                    let (_lease, keep_alive) = zx::EventPair::create();
1164                    debug!(
1165                        "[{}] wake_timer_loop: bogus lease {:?}",
1166                        line!(),
1167                        &keep_alive.get_koid().unwrap()
1168                    );
1169
1170                    {
1171                        let _i1 = ScopedInc::new(&responder_count);
1172                        if let Err(e) = responder
1173                            .send(&alarm_id, Ok(keep_alive))
1174                            .expect("responder is always present")
1175                        {
1176                            error!(
1177                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1178                            );
1179                        } else {
1180                            debug!(
1181                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1182                                format_timer(deadline_boot.into()),
1183                                format_timer(now.into()),
1184                                deadline,
1185                            )
1186                        }
1187                    }
1188                } else {
1189                    trace::duration!(c"alarms", c"Cmd::Start:regular");
1190                    fuchsia_trace::flow_step!(
1191                        c"alarms",
1192                        c"hrtimer_lifecycle",
1193                        timers::get_trace_id(&alarm_id)
1194                    );
1195                    // A timer scheduled for the future gets inserted into the timer heap.
1196                    let was_empty = timers.is_empty();
1197
1198                    let deadline_before = timers.peek_deadline_as_boot();
1199                    let node = match deadline {
1200                        timers::Deadline::Boot(_) => {
1201                            timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1202                        }
1203                        timers::Deadline::Utc(d) => {
1204                            timers.new_node_utc(d, alarm_id, conn_id, responder)
1205                        }
1206                    };
1207                    timers.push(node);
1208                    let deadline_after = timers.peek_deadline_as_boot();
1209
1210                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1211                    let needs_cancel = !was_empty && deadline_changed;
1212                    let needs_reschedule = was_empty || deadline_changed;
1213
1214                    if needs_reschedule {
1215                        // Always schedule the proximate deadline.
1216                        let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1217                        if needs_cancel {
1218                            log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1219                        }
1220                        hrtimer_status = Some(log_long_op!(schedule_hrtimer(
1221                            scope.clone(),
1222                            now,
1223                            &timer_proxy,
1224                            schedulable_deadline,
1225                            snd.clone(),
1226                            &timer_config,
1227                            &schedule_delay_prop,
1228                            &hrtimer_node,
1229                        )));
1230                    }
1231                }
1232            }
1233            Cmd::StopById { timer_id, done } => {
1234                let _i = ScopedInc::new(&stop_count);
1235                defer! {
1236                    signal(&done);
1237                }
1238                trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => timer_id.alarm());
1239                fuchsia_trace::flow_step!(
1240                    c"alarms",
1241                    c"hrtimer_lifecycle",
1242                    timers::get_trace_id(&timer_id.alarm())
1243                );
1244                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1245                let deadline_before = timers.peek_deadline_as_boot();
1246
1247                if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1248                    let deadline_after = timers.peek_deadline_as_boot();
1249
1250                    {
1251                        let _i = ScopedInc::new(&stop_responder_count);
1252                        if let Some(res) = timer_node
1253                            .get_responder()
1254                            .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1255                        {
1256                            // We must reply to the responder to keep the connection open.
1257                            res.expect("infallible");
1258                        }
1259                    }
1260                    if is_deadline_changed(deadline_before, deadline_after) {
1261                        let _i = ScopedInc::new(&stop_hrtimer_count);
1262                        log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1263                    }
1264                    if let Some(deadline) = deadline_after {
1265                        let _i = ScopedInc::new(&schedule_hrtimer_count);
1266                        // Reschedule the hardware timer if the removed timer is the earliest one,
1267                        // and another one exists.
1268                        let new_timer_state = log_long_op!(schedule_hrtimer(
1269                            scope.clone(),
1270                            now,
1271                            &timer_proxy,
1272                            deadline,
1273                            snd.clone(),
1274                            &timer_config,
1275                            &schedule_delay_prop,
1276                            &hrtimer_node,
1277                        ));
1278                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1279                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1280                            // Allow the task to complete. Since this task should have been
1281                            // canceled or completed already, this call should not block for
1282                            // a long time.
1283                            log_long_op!(task);
1284                        }
1285                    } else {
1286                        // No next timer, clean up the hrtimer status.
1287                        hrtimer_status = None;
1288                    }
1289                } else {
1290                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1291                }
1292            }
1293            Cmd::Alarm { expired_deadline, keep_alive } => {
1294                let _i = ScopedInc::new(&alarm_count);
1295
1296                trace::duration!(c"alarms", c"Cmd::Alarm");
1297                // Expire all eligible timers, based on "now".  This is because
1298                // we may have woken up earlier than the actual deadline. This
1299                // happens for example if the timer can not make the actual
1300                // deadline and needs to be re-programmed.
1301                debug!(
1302                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1303                    format_timer(expired_deadline.into()),
1304                    keep_alive.get_koid().unwrap(),
1305                );
1306                let expired_count =
1307                    notify_all(&mut timers, &keep_alive, now, None, &slack_histogram_prop)
1308                        .expect("notification succeeds");
1309                if expired_count == 0 {
1310                    // This could be a resolution switch, or a straggler notification.
1311                    // Either way, the hardware timer is still ticking, cancel it.
1312                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1313                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1314                }
1315                // There is a timer to reschedule, do that now.
1316                hrtimer_status = match timers.peek_deadline_as_boot() {
1317                    None => None,
1318                    Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1319                        scope.clone(),
1320                        now,
1321                        &timer_proxy,
1322                        deadline,
1323                        snd.clone(),
1324                        &timer_config,
1325                        &schedule_delay_prop,
1326                        &hrtimer_node,
1327                    ))),
1328                }
1329            }
1330            Cmd::AlarmFidlError { expired_deadline, error } => {
1331                let _i = ScopedInc::new(&alarm_fidl_count);
1332
1333                trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1334                // We do not have a wake lease, so the system may sleep before
1335                // we get to schedule a new timer. We have no way to avoid it
1336                // today.
1337                let error_string = format!("{}", error);
1338                if !error_cache.contains_key(&error_string) {
1339                    warn!(
1340                        "wake_timer_loop: FIDL error: {}, deadline: {}, now: {}",
1341                        error,
1342                        format_timer(expired_deadline.into()),
1343                        format_timer(now.into()),
1344                    );
1345                    error_cache.insert(error_string, ());
1346                }
1347                // Manufacture a fake lease to make the code below work.
1348                // Maybe use Option instead?
1349                let (_dummy_lease, peer) = zx::EventPair::create();
1350                debug!(
1351                    "bogus lease: {:?} fidl error [{}:{}]",
1352                    &peer.get_koid().unwrap(),
1353                    file!(),
1354                    line!()
1355                );
1356                notify_all(
1357                    &mut timers,
1358                    &peer,
1359                    now,
1360                    Some(TimerOpsError::Fidl(error)),
1361                    &slack_histogram_prop,
1362                )
1363                .expect("notification succeeds");
1364                hrtimer_status = match timers.peek_deadline_as_boot() {
1365                    None => None, // No remaining timers, nothing to schedule.
1366                    Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1367                        scope.clone(),
1368                        now,
1369                        &timer_proxy,
1370                        deadline,
1371                        snd.clone(),
1372                        &timer_config,
1373                        &schedule_delay_prop,
1374                        &hrtimer_node,
1375                    ))),
1376                }
1377            }
1378            Cmd::AlarmDriverError {
1379                expired_deadline,
1380                error,
1381                timer_config_id,
1382                resolution_nanos,
1383                ticks,
1384            } => {
1385                let _i = ScopedInc::new(&alarm_driver_count);
1386
1387                trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1388                let (_dummy_lease, peer) = zx::EventPair::create();
1389                debug!(
1390                    "bogus lease: {:?} driver error. [{}:{}]",
1391                    &peer.get_koid().unwrap(),
1392                    file!(),
1393                    line!()
1394                );
1395                notify_all(
1396                    &mut timers,
1397                    &peer,
1398                    now,
1399                    Some(TimerOpsError::Driver(error)),
1400                    &slack_histogram_prop,
1401                )
1402                .expect("notification succeeds");
1403                match error {
1404                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1405                        // Nothing to do here, cancelation is handled in Stop code.
1406                        debug!(
1407                            "wake_timer_loop: CANCELED timer at deadline: {}",
1408                            format_timer(expired_deadline.into())
1409                        );
1410                    }
1411                    _ => {
1412                        error!(
1413                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1414                            error,
1415                            format_timer(expired_deadline.into()),
1416                            format_timer(now.into()),
1417                            timer_config_id,
1418                            resolution_nanos,
1419                            ticks,
1420                        );
1421                        // We do not have a wake lease, so the system may sleep before
1422                        // we get to schedule a new timer. We have no way to avoid it
1423                        // today.
1424                        hrtimer_status = match timers.peek_deadline_as_boot() {
1425                            None => None,
1426                            Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1427                                scope.clone(),
1428                                now,
1429                                &timer_proxy,
1430                                deadline,
1431                                snd.clone(),
1432                                &timer_config,
1433                                &schedule_delay_prop,
1434                                &hrtimer_node,
1435                            ))),
1436                        }
1437                    }
1438                }
1439            }
1440            Cmd::UtcUpdated { transform } => {
1441                let _i = ScopedInc::new(&utc_update_count);
1442
1443                trace::duration!(c"alarms", c"Cmd::UtcUpdated");
1444                debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1445
1446                // Assigning to this shared reference updates the deadlines of all
1447                // UTC timers.
1448                *utc_transform.borrow_mut() = transform;
1449
1450                // Reschedule the hardware timer with the now-current deadline if there is an
1451                // active timer.
1452                if hrtimer_status.is_some() {
1453                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1454                    // Should we request a wake lock here?
1455                    hrtimer_status = match timers.peek_deadline_as_boot() {
1456                        None => None,
1457                        Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1458                            scope.clone(),
1459                            now,
1460                            &timer_proxy,
1461                            deadline,
1462                            snd.clone(),
1463                            &timer_config,
1464                            &schedule_delay_prop,
1465                            &hrtimer_node,
1466                        ))),
1467                    }
1468                }
1469            }
1470        }
1471
1472        {
1473            let _i = ScopedInc::new(&status_count);
1474
1475            // Print and record diagnostics after each iteration, record the
1476            // duration for performance awareness.  Note that iterations happen
1477            // only occasionally, so these stats can remain unchanged for a long
1478            // time.
1479            trace::duration!(c"timekeeper", c"inspect");
1480            let now_formatted = format_timer(now.into());
1481            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1482            now_formatted_prop.set(&now_formatted);
1483
1484            let pending_timers_count: u64 =
1485                timers.timer_count().try_into().expect("always convertible");
1486            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1487            pending_timers_count_prop.set(pending_timers_count);
1488
1489            let pending_timers = format!("{}", timers);
1490            debug!("wake_timer_loop: currently pending timers:        \n\t{}", &timers);
1491            pending_timers_prop.set(&pending_timers);
1492
1493            let current_deadline: String = hrtimer_status
1494                .as_ref()
1495                .map(|s| format!("{}", format_timer(s.deadline.into())))
1496                .unwrap_or_else(|| "(none)".into());
1497            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1498            current_hw_deadline_prop.set(&current_deadline);
1499
1500            let remaining_duration_until_alarm = hrtimer_status
1501                .as_ref()
1502                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1503                .unwrap_or_else(|| "(none)".into());
1504            debug!(
1505                "wake_timer_loop: remaining duration until alarm:  {}",
1506                &remaining_duration_until_alarm
1507            );
1508            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1509            debug!("---");
1510        }
1511    }
1512
1513    // Prod code should not see this loop ever exiting. the wake alarm manager
1514    // should run forever.
1515    log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1516}
1517
1518/// Schedules a wake alarm.
1519///
1520/// # Args:
1521///
1522/// - `scope`: used to spawn async tasks.
1523/// - `now`: the time instant used as the value of current instant.
1524/// - `hrtimer`: the proxy for the hrtimer device driver.
1525/// - `deadline`: the time instant in the future at which the alarm should fire.
1526/// - `command_send`: the sender channel to use when the timer expires.
1527/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1528///   max tick value.
1529/// - `schedule_delay_histogram`: inspect instrumentation.
1530/// - `debug_node`: used for keeping debug counters.
1531async fn schedule_hrtimer(
1532    scope: fasync::ScopeHandle,
1533    now: fasync::BootInstant,
1534    hrtimer: &Box<dyn TimerOps>,
1535    deadline: fasync::BootInstant,
1536    mut command_send: mpsc::Sender<Cmd>,
1537    timer_config: &TimerConfig,
1538    _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1539    debug_node: &finspect::Node,
1540) -> TimerState {
1541    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1542    trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1543    // When signaled, the hrtimer has been scheduled.
1544    let hrtimer_scheduled = zx::Event::create();
1545
1546    let schedule_count = debug_node.create_int("schedule", 0);
1547    let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1548    let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1549
1550    let _sc = ScopedInc::new(&schedule_count);
1551
1552    debug!(
1553        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1554        format_timer(now.into()),
1555        format_timer(deadline.into()),
1556        format_duration(timeout),
1557    );
1558
1559    let slack = timer_config.pick_setting(timeout);
1560    let resolution_nanos = slack.resolution.into_nanos();
1561    // The driver will reject "0" ticks, even though it probably shouldn't. See for details:
1562    // b/437177931.
1563    let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1564
1565    trace::instant!(c"alarms", c"hrtimer:programmed",
1566        trace::Scope::Process,
1567        "resolution_ns" => resolution_nanos,
1568        "ticks" => useful_ticks
1569    );
1570    let timer_config_id = timer_config.id;
1571    let start_and_wait_fut = {
1572        let _sc = ScopedInc::new(&hrtimer_wait_count);
1573        hrtimer.start_and_wait(
1574            timer_config.id,
1575            &ffhh::Resolution::Duration(resolution_nanos),
1576            useful_ticks,
1577            clone_handle(&hrtimer_scheduled),
1578        )
1579    };
1580
1581    let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1582    let hrtimer_task = scope.spawn_local(async move {
1583        debug!("hrtimer_task: waiting for hrtimer driver response");
1584        trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1585        let response = start_and_wait_fut.await;
1586        trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1587        match response {
1588            Err(TimerOpsError::Fidl(e)) => {
1589                defer! {
1590                    // Allow hrtimer_scheduled to proceed anyways.
1591                    signal(&hrtimer_scheduled_if_error);
1592                }
1593                trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1594                command_send
1595                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1596                    .unwrap();
1597                // BAD: no way to keep alive.
1598            }
1599            Err(TimerOpsError::Driver(e)) => {
1600                defer! {
1601                    // This should be idempotent if the error occurs after
1602                    // the timer was scheduled.
1603                    signal(&hrtimer_scheduled_if_error);
1604                }
1605                let driver_error_str = format!("{:?}", e);
1606                trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1607                // This is very common. For example, a "timer canceled" event
1608                // will result in this code path being hit.
1609                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1610                command_send
1611                    .start_send(Cmd::AlarmDriverError {
1612                        expired_deadline: now,
1613                        error: e,
1614                        timer_config_id,
1615                        resolution_nanos,
1616                        ticks: useful_ticks,
1617                    })
1618                    .unwrap();
1619                // BAD: no way to keep alive.
1620            }
1621            Ok(keep_alive) => {
1622                trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1623                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1624                // May trigger sooner than the deadline.
1625                command_send
1626                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1627                    .unwrap();
1628            }
1629        }
1630        debug!("hrtimer_task: exiting task.");
1631        trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1632    }).into();
1633    debug!("schedule_hrtimer: waiting for event to be signaled");
1634
1635    {
1636        let _i = ScopedInc::new(&wait_signaled_count);
1637        // We must wait here to ensure that the wake alarm has been scheduled.
1638        log_long_op!(wait_signaled(&hrtimer_scheduled));
1639    }
1640
1641    let now_after_signaled = fasync::BootInstant::now();
1642    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1643    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1644        trace::duration!(c"alarms", c"schedule_hrtimer:unusual_duration",
1645            "duration" => duration_until_scheduled.into_nanos());
1646        warn!(
1647            "unusual duration until hrtimer scheduled: {}",
1648            format_duration(duration_until_scheduled)
1649        );
1650    }
1651    // TODO: b/444236931: re-enable.
1652    //schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1653    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1654    TimerState { task: hrtimer_task, deadline }
1655}
1656
1657/// Notify all `timers` that `reference_instant` has been reached.
1658///
1659/// The notified `timers` are removed from the list of timers to notify.
1660///
1661/// Args:
1662/// - `timers`: the collection of currently available timers.
1663/// - `lease_prototype`: an EventPair used as a wake lease.
1664/// - `reference_instant`: the time instant used as a reference for alarm notification.
1665/// - `timer_ops_error`: if set, this is the error that happened while attempting to
1666///   schedule or trigger a timer in hardware.
1667fn notify_all(
1668    timers: &mut timers::Heap,
1669    lease_prototype: &zx::EventPair,
1670    reference_instant: fasync::BootInstant,
1671    timer_ops_error: Option<TimerOpsError>,
1672    _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1673) -> Result<usize> {
1674    trace::duration!(c"alarms", c"notify_all");
1675    let now = fasync::BootInstant::now();
1676    let mut expired = 0;
1677    while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1678        expired += 1;
1679        // How much later than requested did the notification happen.
1680        let deadline = timer_node.get_boot_deadline();
1681        let alarm = timer_node.id().alarm();
1682        let alarm_id = alarm.to_string();
1683        trace::duration!(c"alarms", c"notify_all:notified", "alarm_id" => &*alarm_id);
1684        fuchsia_trace::flow_step!(c"alarms", c"hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1685        let conn_id = timer_node.id().conn.clone();
1686        let slack: zx::BootDuration = deadline - now;
1687        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1688            trace::duration!(c"alarms", c"schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1689            // This alarm triggered noticeably later than it should have.
1690            warn!(
1691                "alarm id: {} had an unusually large slack: {}",
1692                alarm_id,
1693                format_duration(slack)
1694            );
1695        }
1696        if slack < zx::BootDuration::ZERO {
1697            // TODO: b/444236931: re-enable.
1698            //unusual_slack_histogram.insert(-slack.into_nanos());
1699        }
1700        if let Some(ref err) = timer_ops_error {
1701            // Canceled timers are getting notified with alarm, but not other
1702            // errors.
1703            if !err.is_canceled() {
1704                timer_node.get_responder().send(alarm, Err(err.clone().into()));
1705                continue;
1706            }
1707        }
1708        debug!(
1709            concat!(
1710                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1711                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1712            ),
1713            alarm_id,
1714            format_timer(deadline.into()),
1715            conn_id,
1716            format_timer(reference_instant.into()),
1717            format_timer(now.into()),
1718            format_duration(slack),
1719        );
1720        let lease = clone_handle(lease_prototype);
1721        trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1722        if let Some(Err(e)) = timer_node.get_responder().send(alarm, Ok(lease)) {
1723            error!("could not signal responder: {:?}", e);
1724        }
1725        trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1726    }
1727    trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1728    debug!("notify_all: expired count: {}", expired);
1729    Ok(expired)
1730    // A new timer is not scheduled yet here.
1731}
1732
1733/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1734/// named files in this directory. They are expected to come and go.
1735const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1736
1737/// Connects to the high resolution timer device driver.
1738///
1739/// This function watches the hrtimer device directory and connects to the first
1740/// available hrtimer device.
1741///
1742/// # Returns
1743/// A `Result` containing a `ffhh::DeviceProxy` on success, or an error if
1744/// the connection fails.
1745pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1746    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1747    let dir =
1748        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1749            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1750    let path = device_watcher::watch_for_files(&dir)
1751        .await
1752        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1753        .try_next()
1754        .await
1755        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1756    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1757    let path = path
1758        .to_str()
1759        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1760    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1761        .context("Failed to connect built-in service")
1762}
1763
1764#[cfg(test)]
1765mod tests {
1766    use super::*;
1767    use assert_matches::assert_matches;
1768    use diagnostics_assertions::{AnyProperty, assert_data_tree};
1769    use fuchsia_async::TestExecutor;
1770    use futures::select;
1771    use test_case::test_case;
1772    use test_util::{assert_gt, assert_lt};
1773
1774    fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1775        let (_lease, peer) = zx::EventPair::create();
1776        peer
1777    }
1778
1779    #[test]
1780    fn timer_duration_no_overflow() {
1781        let duration1 = TimerDuration {
1782            resolution: zx::BootDuration::from_seconds(100_000_000),
1783            ticks: u64::MAX,
1784        };
1785        let duration2 = TimerDuration {
1786            resolution: zx::BootDuration::from_seconds(110_000_000),
1787            ticks: u64::MAX,
1788        };
1789        assert_eq!(duration1, duration1);
1790        assert_eq!(duration2, duration2);
1791
1792        assert_lt!(duration1, duration2);
1793        assert_gt!(duration2, duration1);
1794    }
1795
1796    #[test_case(
1797        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1798        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1799    )]
1800    #[test_case(
1801        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1802        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1803    )]
1804    #[test_case(
1805        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1806        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1807    )]
1808    #[test_case(
1809        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1810        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1811    )]
1812    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1813        assert_eq!(one, other);
1814    }
1815
1816    #[test_case(
1817        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1818        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1819    )]
1820    #[test_case(
1821        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1822        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1823    )]
1824    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1825        assert_lt!(one, other);
1826    }
1827
1828    #[test_case(
1829        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1830        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1831    )]
1832    #[test_case(
1833        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1834        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1835    )]
1836    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1837        assert_gt!(one, other);
1838    }
1839
1840    #[test_case(
1841        vec![zx::BootDuration::from_nanos(1)],
1842        100,
1843        zx::BootDuration::from_nanos(0),
1844        TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1845    )]
1846    #[test_case(
1847        vec![zx::BootDuration::from_nanos(1)],
1848        100,
1849        zx::BootDuration::from_nanos(50),
1850        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1851    )]
1852    #[test_case(
1853        vec![zx::BootDuration::from_nanos(2)],
1854        100,
1855        zx::BootDuration::from_nanos(50),
1856        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1857    )]
1858    #[test_case(
1859        vec![zx::BootDuration::from_nanos(3)],
1860        100,
1861        zx::BootDuration::from_nanos(50),
1862        // The closest duration is 51ns.
1863        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1864    )]
1865    #[test_case(
1866        vec![
1867            zx::BootDuration::from_nanos(3),
1868            zx::BootDuration::from_nanos(4)
1869        ],
1870        100,
1871        zx::BootDuration::from_nanos(50),
1872        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1873    )]
1874    #[test_case(
1875        vec![
1876            zx::BootDuration::from_nanos(1000),
1877        ],
1878        100,
1879        zx::BootDuration::from_nanos(50),
1880        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1881        "950ns negative slack is the best we can do"
1882    )]
1883    #[test_case(
1884        vec![
1885            zx::BootDuration::from_nanos(1),
1886        ],
1887        10,
1888        zx::BootDuration::from_nanos(50),
1889        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1890        "10ns positive slack is the best we can do"
1891    )]
1892    #[test_case(
1893        vec![
1894            zx::BootDuration::from_millis(1),
1895            zx::BootDuration::from_micros(100),
1896            zx::BootDuration::from_micros(10),
1897            zx::BootDuration::from_micros(1),
1898        ],
1899        20,  // Make only one of the resolutions above match.
1900        zx::BootDuration::from_micros(150),
1901        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1902        "Realistic case with resolutions from driver, should be 15us"
1903    )]
1904    #[test_case(
1905        vec![
1906            zx::BootDuration::from_millis(1),
1907            zx::BootDuration::from_micros(100),
1908            zx::BootDuration::from_micros(10),
1909            zx::BootDuration::from_micros(1),
1910        ],
1911        2000,  // Make only one of the resolutions above match.
1912        zx::BootDuration::from_micros(6000),
1913        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1914        "Coarser exact unit wins"
1915    )]
1916    #[test_case(
1917        vec![
1918            zx::BootDuration::from_millis(1),
1919            zx::BootDuration::from_millis(10),
1920            zx::BootDuration::from_millis(100),
1921        ],
1922        1000,
1923        zx::BootDuration::from_micros(-10),
1924        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1925        "Negative duration gets the smallest timer duration"
1926    )]
1927    #[test_case(
1928        vec![
1929            zx::BootDuration::from_millis(1),
1930            zx::BootDuration::from_millis(10),
1931            zx::BootDuration::from_millis(100),
1932        ],
1933        1000,
1934        zx::BootDuration::ZERO,
1935        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1936        "Zero duration gets the smallest timer duration"
1937    )]
1938    fn test_pick_setting(
1939        resolutions: Vec<zx::BootDuration>,
1940        max_ticks: u64,
1941        duration: zx::BootDuration,
1942        expected: TimerDuration,
1943    ) {
1944        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1945        let actual = config.pick_setting(duration);
1946
1947        // .eq() does not work here, since we do not just require that the values
1948        // be equal, but also that the same resolution is used in both.
1949        assert_slack_eq(expected, actual);
1950    }
1951
1952    // TimerDuration assertion with human-friendly output in case of an error.
1953    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1954        let slack = expected.duration() - actual.duration();
1955        assert_eq!(
1956            actual.resolution(),
1957            expected.resolution(),
1958            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1959            expected,
1960            format_duration(expected.duration()),
1961            actual,
1962            format_duration(actual.duration()),
1963            format_duration(slack)
1964        );
1965        assert_eq!(
1966            actual.ticks(),
1967            expected.ticks(),
1968            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1969            expected,
1970            format_duration(expected.duration()),
1971            actual,
1972            format_duration(actual.duration()),
1973            format_duration(slack)
1974        );
1975    }
1976
1977    #[derive(Debug)]
1978    enum FakeCmd {
1979        SetProperties {
1980            resolutions: Vec<zx::BootDuration>,
1981            max_ticks: i64,
1982            keep_alive: zx::EventPair,
1983            done: zx::Event,
1984        },
1985    }
1986
1987    use std::cell::RefCell;
1988    use std::rc::Rc;
1989
1990    // A fake that emulates some aspects of the hrtimer driver.
1991    //
1992    // Specifically it can be configured with different resolutions, and will
1993    // bomb out if any waiting methods are called twice in a succession, without
1994    // canceling the timer in between.
1995    fn fake_hrtimer_connection(
1996        scope: fasync::ScopeHandle,
1997        rcv: mpsc::Receiver<FakeCmd>,
1998    ) -> ffhh::DeviceProxy {
1999        debug!("fake_hrtimer_connection: entry.");
2000        let (hrtimer, mut stream) =
2001            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
2002        scope.clone().spawn_local(async move {
2003            let mut rcv = rcv.fuse();
2004            let timer_properties = Rc::new(RefCell::new(None));
2005            let wake_lease = Rc::new(RefCell::new(None));
2006
2007            // Set to true when the hardware timer is supposed to be running.
2008            // Hardware timer may not be reprogrammed without canceling it first,
2009            // make sure the tests fail the same way as production would.
2010            let timer_running = Rc::new(RefCell::new(false));
2011
2012            loop {
2013                let timer_properties = timer_properties.clone();
2014                let wake_lease = wake_lease.clone();
2015                select! {
2016                    cmd = rcv.next() => {
2017                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
2018                        match cmd {
2019                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
2020                                let mut timer_props = vec![];
2021                                for v in 0..10 {
2022                                    timer_props.push(ffhh::TimerProperties {
2023                                        supported_resolutions: Some(
2024                                            resolutions.iter()
2025                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
2026                                        max_ticks: Some(max_ticks.try_into().unwrap()),
2027                                        // start_and_wait method works.
2028                                        supports_wait: Some(true),
2029                                        id: Some(v),
2030                                        ..Default::default()
2031                                        },
2032                                    );
2033                                }
2034                                *timer_properties.borrow_mut() = Some(timer_props);
2035                                *wake_lease.borrow_mut() = Some(keep_alive);
2036                                debug!("set timer properties to: {:?}", timer_properties);
2037                                signal(&done);
2038                            }
2039                            e => {
2040                                panic!("unrecognized command: {:?}", e);
2041                            }
2042                        }
2043                        // Set some responses if we have them.
2044                    },
2045                    event = stream.next() => {
2046                        debug!("fake_hrtimer_connection: event: {:?}", event);
2047                        if let Some(Ok(event)) = event {
2048                            match event {
2049                                ffhh::DeviceRequest::Start { responder, .. } => {
2050                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2051                                    *timer_running.borrow_mut() = true;
2052                                    responder.send(Ok(())).expect("");
2053                                }
2054                                ffhh::DeviceRequest::Stop { responder, .. } => {
2055                                    *timer_running.borrow_mut() = false;
2056                                    responder.send(Ok(())).expect("");
2057                                }
2058                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2059                                    responder.send(Ok(1)).expect("");
2060                                }
2061                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
2062                                    responder.send(Ok(())).expect("");
2063                                }
2064                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2065                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2066                                    *timer_running.borrow_mut() = true;
2067                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2068                                    let ticks: i64 = ticks.try_into().unwrap();
2069                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
2070                                        ffhh::Resolution::Duration(e) => e,
2071                                        _ => {
2072                                            error!("resolution has an unexpected value");
2073                                            1
2074                                        }
2075                                    });
2076                                    let timer_running_clone = timer_running.clone();
2077                                    scope.spawn_local(async move {
2078                                        // Signaling the setup event allows the client to proceed
2079                                        // with post-scheduling work.
2080                                        signal(&setup_event);
2081
2082                                        // Respond after the requested sleep time. In tests this will
2083                                        // be sleeping in fake time.
2084                                        fasync::Timer::new(sleep_duration).await;
2085                                        *timer_running_clone.borrow_mut() = false;
2086                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2087                                        debug!("StartAndWait: hrtimer expired");
2088                                    });
2089                                }
2090                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2091                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2092                                    *timer_running.borrow_mut() = true;
2093                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2094                                }
2095                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
2096                                    if (*timer_properties).borrow().is_none() {
2097                                        error!("timer_properties is empty, this is not what you want!");
2098                                    }
2099                                    responder
2100                                        .send(ffhh::Properties {
2101                                            timers_properties: (*timer_properties).borrow().clone(),
2102                                            ..Default::default()
2103                                        })
2104                                        .expect("");
2105                                }
2106                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2107                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2108                                }
2109                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2110                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2111                                }
2112                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2113                            }
2114                        }
2115                    },
2116                }
2117            }
2118        });
2119        hrtimer
2120    }
2121
2122    fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2123        orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2124    }
2125
2126    struct TestContext {
2127        wake_proxy: fta::WakeAlarmsProxy,
2128        _scope: fasync::Scope,
2129        _cmd_tx: mpsc::Sender<FakeCmd>,
2130        // Use to manipulate the UTC clock from the test.
2131        utc_clock: fxr::UtcClock,
2132        utc_backstop: fxr::UtcInstant,
2133    }
2134
2135    impl TestContext {
2136        async fn new() -> Self {
2137            TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2138
2139            let scope = fasync::Scope::new();
2140            let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2141            let utc_clock =
2142                fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2143            let utc_clone = clone_utc_clock(&utc_clock);
2144            let (mut cmd_tx, wake_proxy) = {
2145                let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2146                let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2147
2148                let inspector = finspect::component::inspector();
2149                let alarms = Rc::new(Loop::new(
2150                    scope.to_handle(),
2151                    hrtimer_proxy,
2152                    inspector.root().create_child("test"),
2153                    utc_clone,
2154                ));
2155
2156                let (proxy, stream) =
2157                    fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2158                scope.spawn_local(async move {
2159                    serve(alarms, stream).await;
2160                });
2161                (tx, proxy)
2162            };
2163
2164            let (_wake_lease, peer) = zx::EventPair::create();
2165            let done = zx::Event::create();
2166            cmd_tx
2167                .start_send(FakeCmd::SetProperties {
2168                    resolutions: vec![zx::Duration::from_nanos(1)],
2169                    max_ticks: 100,
2170                    keep_alive: peer,
2171                    done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2172                })
2173                .unwrap();
2174
2175            // Wait until hrtimer configuration has completed.
2176            assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2177
2178            Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2179        }
2180    }
2181
2182    impl Drop for TestContext {
2183        fn drop(&mut self) {
2184            assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2185        }
2186    }
2187
2188    #[fuchsia::test(allow_stalls = false)]
2189    async fn test_basic_timed_wait() {
2190        let ctx = TestContext::new().await;
2191
2192        let deadline = zx::BootInstant::from_nanos(100);
2193        let setup_done = zx::Event::create();
2194        let mut set_task = ctx.wake_proxy.set_and_wait(
2195            deadline.into(),
2196            fta::SetMode::NotifySetupDone(
2197                setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2198            ),
2199            "Hello".into(),
2200        );
2201
2202        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2203
2204        let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2205        assert_matches!(
2206            TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2207            Poll::Ready(Ok(_)),
2208            "Setup event not triggered after scheduling an alarm"
2209        );
2210
2211        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2212        assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2213    }
2214
2215    #[fuchsia::test(allow_stalls = false)]
2216    async fn test_basic_timed_wait_notify() {
2217        const ALARM_ID: &str = "Hello";
2218        let ctx = TestContext::new().await;
2219
2220        let (notifier_client, mut notifier_stream) =
2221            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2222        let setup_done = zx::Event::create();
2223        assert_matches!(
2224            ctx.wake_proxy
2225                .set(
2226                    notifier_client,
2227                    fidl::BootInstant::from_nanos(2),
2228                    fta::SetMode::NotifySetupDone(
2229                        setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2230                    ),
2231                    ALARM_ID,
2232                )
2233                .await,
2234            Ok(Ok(()))
2235        );
2236
2237        let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2238        assert_matches!(
2239            TestExecutor::poll_until_stalled(&mut done_task).await,
2240            Poll::Ready(Ok(_)),
2241            "Setup event not triggered after scheduling an alarm"
2242        );
2243
2244        let mut next_task = notifier_stream.next();
2245        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2246
2247        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2248        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2249
2250        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2251        assert_matches!(
2252            TestExecutor::poll_until_stalled(next_task).await,
2253            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2254        );
2255    }
2256
2257    #[fuchsia::test(allow_stalls = false)]
2258    async fn test_two_alarms_same() {
2259        const DEADLINE_NANOS: i64 = 100;
2260
2261        let ctx = TestContext::new().await;
2262
2263        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2264            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2265            fta::SetMode::KeepAlive(fake_wake_lease()),
2266            "Hello1".into(),
2267        );
2268        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2269            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2270            fta::SetMode::KeepAlive(fake_wake_lease()),
2271            "Hello2".into(),
2272        );
2273
2274        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2275        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2276
2277        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2278
2279        assert_matches!(
2280            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2281            Poll::Ready(Ok(Ok(_)))
2282        );
2283        assert_matches!(
2284            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2285            Poll::Ready(Ok(Ok(_)))
2286        );
2287    }
2288
2289    #[fuchsia::test(allow_stalls = false)]
2290    async fn test_two_alarms_same_notify() {
2291        const DEADLINE_NANOS: i64 = 100;
2292        const ALARM_ID_1: &str = "Hello1";
2293        const ALARM_ID_2: &str = "Hello2";
2294
2295        let ctx = TestContext::new().await;
2296
2297        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2298            let (notifier_client, notifier_stream) =
2299                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2300            assert_matches!(
2301                ctx.wake_proxy
2302                    .set(
2303                        notifier_client,
2304                        fidl::BootInstant::from_nanos(deadline_nanos),
2305                        fta::SetMode::KeepAlive(fake_wake_lease()),
2306                        alarm_id,
2307                    )
2308                    .await,
2309                Ok(Ok(()))
2310            );
2311            notifier_stream
2312        };
2313
2314        let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2315        let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2316
2317        let mut next_task_1 = notifier_1.next();
2318        let mut next_task_2 = notifier_2.next();
2319
2320        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2321        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2322
2323        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2324
2325        assert_matches!(
2326            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2327            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2328        );
2329        assert_matches!(
2330            TestExecutor::poll_until_stalled(&mut next_task_2).await,
2331            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2332        );
2333
2334        assert_matches!(
2335            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2336            Poll::Ready(None)
2337        );
2338        assert_matches!(
2339            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2340            Poll::Ready(None)
2341        );
2342    }
2343
2344    #[test_case(100, 200 ; "push out")]
2345    #[test_case(200, 100 ; "pull in")]
2346    #[fuchsia::test(allow_stalls = false)]
2347    async fn test_two_alarms_different(
2348        // One timer scheduled at this instant (fake time starts from zero).
2349        first_deadline_nanos: i64,
2350        // Another timer scheduled at this instant.
2351        second_deadline_nanos: i64,
2352    ) {
2353        let ctx = TestContext::new().await;
2354
2355        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2356            fidl::BootInstant::from_nanos(first_deadline_nanos),
2357            fta::SetMode::KeepAlive(fake_wake_lease()),
2358            "Hello1".into(),
2359        );
2360        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2361            fidl::BootInstant::from_nanos(second_deadline_nanos),
2362            fta::SetMode::KeepAlive(fake_wake_lease()),
2363            "Hello2".into(),
2364        );
2365
2366        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2367        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2368
2369        // Sort alarms by their deadlines.
2370        let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2371        tasks.sort_by(|a, b| a.0.cmp(&b.0));
2372        let [mut first_task, mut second_task] = tasks;
2373
2374        // Alarms should fire in order of deadlines.
2375        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2376        assert_matches!(
2377            TestExecutor::poll_until_stalled(&mut first_task.1).await,
2378            Poll::Ready(Ok(Ok(_)))
2379        );
2380        assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2381
2382        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2383        assert_matches!(
2384            TestExecutor::poll_until_stalled(&mut second_task.1).await,
2385            Poll::Ready(Ok(Ok(_)))
2386        );
2387    }
2388
2389    #[test_case(100, 200 ; "push out")]
2390    #[test_case(200, 100 ; "pull in")]
2391    #[fuchsia::test(allow_stalls = false)]
2392    async fn test_two_alarms_different_notify(
2393        // One timer scheduled at this instant (fake time starts from zero).
2394        first_deadline_nanos: i64,
2395        // Another timer scheduled at this instant.
2396        second_deadline_nanos: i64,
2397    ) {
2398        const ALARM_ID_1: &str = "Hello1";
2399        const ALARM_ID_2: &str = "Hello2";
2400
2401        let ctx = TestContext::new().await;
2402
2403        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2404            let (notifier_client, notifier_stream) =
2405                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2406            assert_matches!(
2407                ctx.wake_proxy
2408                    .set(
2409                        notifier_client,
2410                        fidl::BootInstant::from_nanos(deadline_nanos),
2411                        fta::SetMode::KeepAlive(fake_wake_lease()),
2412                        alarm_id,
2413                    )
2414                    .await,
2415                Ok(Ok(()))
2416            );
2417            notifier_stream
2418        };
2419
2420        // Sort alarms by their deadlines.
2421        let mut notifier_all = futures::stream::select_all([
2422            schedule(first_deadline_nanos, ALARM_ID_1).await,
2423            schedule(second_deadline_nanos, ALARM_ID_2).await,
2424        ]);
2425        let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2426            let mut tasks =
2427                [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2428            tasks.sort_by(|a, b| a.0.cmp(&b.0));
2429            tasks
2430        };
2431
2432        // Alarms should fire in order of deadlines.
2433        let mut next_task = notifier_all.next();
2434        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2435
2436        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2437        assert_matches!(
2438            TestExecutor::poll_until_stalled(next_task).await,
2439            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2440        );
2441
2442        let mut next_task = notifier_all.next();
2443        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2444
2445        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2446        assert_matches!(
2447            TestExecutor::poll_until_stalled(next_task).await,
2448            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2449        );
2450        assert_matches!(
2451            TestExecutor::poll_until_stalled(notifier_all.next()).await,
2452            Poll::Ready(None)
2453        );
2454    }
2455
2456    #[fuchsia::test(allow_stalls = false)]
2457    async fn test_alarm_immediate() {
2458        let ctx = TestContext::new().await;
2459        let mut set_task = ctx.wake_proxy.set_and_wait(
2460            fidl::BootInstant::INFINITE_PAST,
2461            fta::SetMode::KeepAlive(fake_wake_lease()),
2462            "Hello1".into(),
2463        );
2464        assert_matches!(
2465            TestExecutor::poll_until_stalled(&mut set_task).await,
2466            Poll::Ready(Ok(Ok(_)))
2467        );
2468    }
2469
2470    #[fuchsia::test(allow_stalls = false)]
2471    async fn test_alarm_immediate_notify() {
2472        const ALARM_ID: &str = "Hello";
2473        let ctx = TestContext::new().await;
2474
2475        let (notifier_client, mut notifier_stream) =
2476            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2477
2478        let mut set_task = ctx.wake_proxy.set(
2479            notifier_client,
2480            fidl::BootInstant::INFINITE_PAST,
2481            fta::SetMode::KeepAlive(fake_wake_lease()),
2482            ALARM_ID,
2483        );
2484        assert_matches!(
2485            TestExecutor::poll_until_stalled(&mut set_task).await,
2486            Poll::Ready(Ok(Ok(_)))
2487        );
2488        assert_matches!(
2489            TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2490            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2491        );
2492    }
2493
2494    // Rescheduling a timer will cancel the earlier call and use the new
2495    // deadline for the later call.
2496    #[test_case(200, 100 ; "pull in")]
2497    #[test_case(100, 200 ; "push out")]
2498    #[test_case(100, 100 ; "replace with the same deadline")]
2499    #[fuchsia::test(allow_stalls = false)]
2500    async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2501        const ALARM_ID: &str = "Hello";
2502
2503        let ctx = TestContext::new().await;
2504
2505        let schedule = |deadline_nanos: i64| {
2506            let setup_done = zx::Event::create();
2507            let task = ctx.wake_proxy.set_and_wait(
2508                fidl::BootInstant::from_nanos(deadline_nanos),
2509                fta::SetMode::NotifySetupDone(
2510                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2511                ),
2512                ALARM_ID.into(),
2513            );
2514            (task, setup_done)
2515        };
2516
2517        // Schedule timer with a long timeout first. Let it wait, then
2518        // try to reschedule the same timer
2519        let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2520        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2521        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2522
2523        // Schedule the same timer as above, but with a shorter deadline. This
2524        // should cancel the earlier call.
2525        let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2526        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2527        assert_matches!(
2528            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2529            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2530        );
2531        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2532
2533        // The later call will be fired exactly on the new shorter deadline.
2534        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2535            .await;
2536        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2537        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2538            .await;
2539        assert_matches!(
2540            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2541            Poll::Ready(Ok(Ok(_)))
2542        );
2543
2544        // The values in the inspector tree are fixed because the test
2545        // runs fully deterministically in fake time.
2546        assert_data_tree!(finspect::component::inspector(), root: {
2547            test: {
2548                hardware: {
2549                    // All alarms fired, so this should be "none".
2550                    current_deadline: "(none)",
2551                    remaining_until_alarm: "(none)",
2552                },
2553                now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2554                now_ns: override_deadline_nanos,
2555                pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2556                pending_timers_count: 0u64,
2557                requested_deadlines_ns: AnyProperty,
2558                schedule_delay_ns: AnyProperty,
2559                slack_ns: AnyProperty,
2560                boot_deadlines_count: AnyProperty,
2561                utc_deadlines_count: AnyProperty,
2562                debug_node: contains {},
2563            },
2564        });
2565    }
2566
2567    // Rescheduling a timer will send an error on the old notifier and use the
2568    // new notifier for the new deadline.
2569    #[fuchsia::test(allow_stalls = false)]
2570    async fn test_reschedule_notify() {
2571        const ALARM_ID: &str = "Hello";
2572        const INITIAL_DEADLINE_NANOS: i64 = 100;
2573        const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2574
2575        let ctx = TestContext::new().await;
2576
2577        let schedule = async |deadline_nanos: i64| {
2578            let (notifier_client, notifier_stream) =
2579                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2580            assert_matches!(
2581                ctx.wake_proxy
2582                    .set(
2583                        notifier_client,
2584                        fidl::BootInstant::from_nanos(deadline_nanos),
2585                        fta::SetMode::KeepAlive(fake_wake_lease()),
2586                        ALARM_ID.into(),
2587                    )
2588                    .await,
2589                Ok(Ok(()))
2590            );
2591            notifier_stream
2592        };
2593
2594        let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2595        let mut next_task_1 = notifier_1.next();
2596        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2597
2598        let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2599        let mut next_task_2 = notifier_2.next();
2600        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2601
2602        // First notifier is called with an error then closed.
2603        assert_matches!(
2604            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2605            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2606        );
2607        assert_matches!(
2608            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2609            Poll::Ready(None)
2610        );
2611
2612        // Second notifier is called upon the new deadline then closed.
2613        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2614            .await;
2615        assert_matches!(
2616            TestExecutor::poll_until_stalled(next_task_2).await,
2617            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2618        );
2619        assert_matches!(
2620            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2621            Poll::Ready(None)
2622        );
2623    }
2624
2625    // If we get two scheduling FIDL errors one after another, the wake alarm
2626    // manager must not lock up.
2627    #[fuchsia::test(allow_stalls = false)]
2628    async fn test_fidl_error_on_reschedule() {
2629        const DEADLINE_NANOS: i64 = 100;
2630
2631        let (wake_proxy, _stream) =
2632            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2633        drop(_stream);
2634
2635        assert_matches!(
2636            wake_proxy
2637                .set_and_wait(
2638                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2639                    fta::SetMode::KeepAlive(fake_wake_lease()),
2640                    "hello1".into(),
2641                )
2642                .await,
2643            Err(fidl::Error::ClientChannelClosed { .. })
2644        );
2645
2646        assert_matches!(
2647            wake_proxy
2648                .set_and_wait(
2649                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2650                    fta::SetMode::KeepAlive(fake_wake_lease()),
2651                    "hello2".into(),
2652                )
2653                .await,
2654            Err(fidl::Error::ClientChannelClosed { .. })
2655        );
2656    }
2657
2658    // Verify that if a UTC timer is scheduled in the future on the UTC timeline, then the
2659    // UTC clock is changed to move "now" beyond the timer's deadline, the timer fires.
2660    #[fuchsia::test(allow_stalls = false)]
2661    async fn test_set_and_wait_utc() {
2662        const ALARM_ID: &str = "Hello_set_and_wait_utc";
2663        let ctx = TestContext::new().await;
2664
2665        let now_boot = fasync::BootInstant::now();
2666        ctx.utc_clock
2667            .update(
2668                zx::ClockUpdate::builder()
2669                    .absolute_value(now_boot.into(), ctx.utc_backstop)
2670                    .build(),
2671            )
2672            .unwrap();
2673
2674        let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2675        let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2676            &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2677            fta::SetMode::KeepAlive(fake_wake_lease()),
2678            ALARM_ID,
2679        );
2680
2681        // Timer is not expired yet.
2682        assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2683
2684        // Move the UTC timeline.
2685        ctx.utc_clock
2686            .update(
2687                zx::ClockUpdate::builder()
2688                    .absolute_value(
2689                        now_boot.into(),
2690                        ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2691                    )
2692                    .build(),
2693            )
2694            .unwrap();
2695
2696        // See similar code in the test above.
2697        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2698        assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2699    }
2700}