alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28use anyhow::{anyhow, Result};
29use fidl::encoding::ProxyChannelBox;
30use fidl::endpoints::RequestStream;
31use fidl::HandleBased;
32use fuchsia_inspect::{HistogramProperty, Property};
33use futures::channel::mpsc;
34use futures::sink::SinkExt;
35use futures::StreamExt;
36use log::{debug, error, warn};
37use scopeguard::defer;
38use std::cell::RefCell;
39use std::cmp;
40use std::collections::{BTreeMap, BinaryHeap, HashMap};
41use std::rc::Rc;
42use std::sync::LazyLock;
43use zx::AsHandleRef;
44use {
45    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
46    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_trace as trace,
47};
48
49static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
50static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
51
52/// The largest value of timer "ticks" that is still considered useful.
53static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
54
55/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
56/// Starnix, and should eventually no longer be critical.
57const MAIN_TIMER_ID: usize = 6;
58
59/// TODO(b/383062441): remove this special casing once Starnix hrtimer is fully
60/// migrated to multiplexed timer.
61/// A special-cased Starnix timer ID, used to allow cross-connection setup
62/// for Starnix only.
63const TEMPORARY_STARNIX_TIMER_ID: &str = "starnix-hrtimer";
64static TEMPORARY_STARNIX_CID: LazyLock<zx::Event> = LazyLock::new(|| zx::Event::create());
65
66// This may be already handled by something, but I don't want new deps.
67const USEC_IN_NANOS: i64 = 1000;
68const MSEC_IN_NANOS: i64 = 1000 * USEC_IN_NANOS;
69const SEC_IN_NANOS: i64 = 1000 * MSEC_IN_NANOS;
70const MIN_IN_NANOS: i64 = SEC_IN_NANOS * 60;
71const HOUR_IN_NANOS: i64 = MIN_IN_NANOS * 60;
72const DAY_IN_NANOS: i64 = HOUR_IN_NANOS * 24;
73const WEEK_IN_NANOS: i64 = DAY_IN_NANOS * 7;
74const YEAR_IN_NANOS: i64 = DAY_IN_NANOS * 365; // Approximate.
75
76static UNITS: LazyLock<Vec<(i64, &'static str)>> = LazyLock::new(|| {
77    vec![
78        (YEAR_IN_NANOS, "year(s)"),
79        (WEEK_IN_NANOS, "week(s)"),
80        (DAY_IN_NANOS, "day(s)"),
81        (HOUR_IN_NANOS, "h"),
82        (MIN_IN_NANOS, "min"),
83        (SEC_IN_NANOS, "s"),
84        (MSEC_IN_NANOS, "ms"),
85        (USEC_IN_NANOS, "μs"),
86        (1, "ns"),
87    ]
88});
89
90// Formats a time value into a simplistic human-readable string.  This is meant
91// to be a human-friendly, but not an impeccable format.
92fn format_common(mut value: i64) -> String {
93    let value_copy = value;
94    let mut repr: Vec<String> = vec![];
95    for (unit_value, unit_str) in UNITS.iter() {
96        if value == 0 {
97            break;
98        }
99        let num_units = value / unit_value;
100        if num_units.abs() > 0 {
101            repr.push(format!("{}{}", num_units, unit_str));
102            value = value % unit_value;
103        }
104    }
105    if repr.len() == 0 {
106        repr.push("0ns".to_string());
107    }
108    // 1year(s)_3week(s)_4day(s)_1h_2m_340ms. Not ideal but user-friendly enough.
109    let repr = repr.join("_");
110
111    let mut ret = vec![];
112    ret.push(repr);
113    // Also add the full nanosecond value too.
114    ret.push(format!("({})", value_copy));
115    ret.join(" ")
116}
117
118// Pretty prints a timer value into a simplistic format.
119fn format_timer<T: zx::Timeline>(timer: zx::Instant<T>) -> String {
120    format_common(timer.into_nanos())
121}
122
123// Pretty prints a duration into a simplistic format.
124fn format_duration<T: zx::Timeline>(duration: zx::Duration<T>) -> String {
125    format_common(duration.into_nanos())
126}
127
128/// Compares two optional deadlines and returns true if the `before is different from `after.
129/// Nones compare as equal.
130fn is_deadline_changed(
131    before: Option<fasync::BootInstant>,
132    after: Option<fasync::BootInstant>,
133) -> bool {
134    match (before, after) {
135        (None, None) => false,
136        (None, Some(_)) | (Some(_), None) => true,
137        (Some(before), Some(after)) => before != after,
138    }
139}
140
141/// Stops a currently running hardware timer.
142async fn stop_hrtimer(hrtimer: &ffhh::DeviceProxy, timer_config: &TimerConfig) {
143    trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
144    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
145    let _ = hrtimer
146        .stop(timer_config.id)
147        .await
148        .map(|result| {
149            let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
150        })
151        .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
152    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
153}
154
155// The default size of the channels created in this module.
156const CHANNEL_SIZE: usize = 100;
157
158/// A type handed around between the concurrent loops run by this module.
159#[derive(Debug)]
160enum Cmd {
161    /// Request a timer to be started.
162    Start {
163        /// The unique connection ID.
164        cid: zx::Koid,
165        /// A timestamp (presumably in the future), at which to expire the timer.
166        deadline: fasync::BootInstant,
167        /// A wake lease token. Hold onto this value while we must prevent the
168        /// system from going to sleep.
169        ///
170        /// This is important so that wake alarms can be scheduled before we
171        /// allow the system to go to sleep.
172        setup_done: zx::Event,
173        /// An alarm identifier, chosen by the caller.
174        alarm_id: String,
175        /// A responder that will be called when the timer expires. The
176        /// client end of the connection will block until we send something
177        /// on this responder.
178        ///
179        /// This is packaged into a Rc... only because both the "happy path"
180        /// and the error path must consume the responder.  This allows them
181        /// to be consumed, without the responder needing to implement Default.
182        responder: Rc<RefCell<Option<fta::WakeSetAndWaitResponder>>>,
183    },
184    StopById {
185        done: zx::Event,
186        timer_id: TimerId,
187    },
188    Alarm {
189        expired_deadline: fasync::BootInstant,
190        keep_alive: fidl::EventPair,
191    },
192    AlarmFidlError {
193        expired_deadline: fasync::BootInstant,
194        error: fidl::Error,
195    },
196    AlarmDriverError {
197        expired_deadline: fasync::BootInstant,
198        error: ffhh::DriverError,
199    },
200}
201
202impl std::fmt::Display for Cmd {
203    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
204        match self {
205            Cmd::Start { cid, deadline, alarm_id, .. } => {
206                write!(
207                    f,
208                    "Start[alarm_id=\"{}\", cid={:?}, deadline={}]",
209                    alarm_id,
210                    cid,
211                    format_timer((*deadline).into())
212                )
213            }
214            Cmd::Alarm { expired_deadline, .. } => {
215                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
216            }
217            Cmd::AlarmFidlError { expired_deadline, error } => {
218                write!(
219                    f,
220                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
221                    format_timer((*expired_deadline).into()),
222                    error
223                )
224            }
225            Cmd::AlarmDriverError { expired_deadline, error } => {
226                write!(
227                    f,
228                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
229                    format_timer((*expired_deadline).into()),
230                    error
231                )
232            }
233            Cmd::StopById { timer_id, done: _ } => {
234                write!(f, "StopById[timerId={}]", timer_id,)
235            }
236        }
237    }
238}
239
240/// Extracts a KOID from the undelrying channel of the provided [stream].
241///
242/// # Returns
243/// - zx::Koid: the KOID you wanted.
244/// - fta::WakeRequestStream: the stream; we had to deconstruct it briefly,
245///   so this gives it back to you.
246pub fn get_stream_koid(stream: fta::WakeRequestStream) -> (zx::Koid, fta::WakeRequestStream) {
247    let (inner, is_terminated) = stream.into_inner();
248    let koid = inner.channel().as_channel().get_koid().expect("infallible");
249    let stream = fta::WakeRequestStream::from_inner(inner, is_terminated);
250    (koid, stream)
251}
252
253/// Serves a single Wake API client.
254pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeRequestStream) {
255    // Compute the request ID somehow.
256    fasync::Task::local(async move {
257        let timer_loop = timer_loop.clone();
258        let timer_loop_send = || timer_loop.get_sender();
259        let (cid, mut requests) = get_stream_koid(requests);
260        let mut request_count = 0;
261        debug!("alarms::serve: opened connection: {:?}", cid);
262        while let Some(maybe_request) = requests.next().await {
263            request_count += 1;
264            debug!("alarms::serve: cid: {:?} incoming request: {}", cid, request_count);
265            match maybe_request {
266                Ok(request) => {
267                    // Should return quickly.
268                    handle_request(cid, timer_loop_send(), request).await;
269                }
270                Err(e) => {
271                    warn!("alarms::serve: error in request: {:?}", e);
272                }
273            }
274            debug!("alarms::serve: cid: {:?} done request: {}", cid, request_count);
275        }
276        // Check if connection closure was intentional. It is way too easy to close
277        // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
278        warn!("alarms::serve: CLOSED CONNECTION: cid: {:?}", cid);
279    })
280    .detach();
281}
282
283// Inject a constant KOID as connection ID (cid) if the singular alarm ID corresponds to a Starnix
284// alarm.
285// TODO(b/383062441): remove this special casing.
286fn compute_cid(cid: zx::Koid, alarm_id: &str) -> zx::Koid {
287    if alarm_id == TEMPORARY_STARNIX_TIMER_ID {
288        // Temporarily, the Starnix timer is a singleton and always gets the
289        // same CID.
290        TEMPORARY_STARNIX_CID.as_handle_ref().get_koid().expect("infallible")
291    } else {
292        cid
293    }
294}
295
296async fn handle_cancel(alarm_id: String, cid: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
297    let done = zx::Event::create();
298    let cid = compute_cid(cid, &alarm_id);
299    let timer_id = TimerId { alarm_id: alarm_id.clone(), cid };
300    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
301        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
302    }
303    wait_signaled(&done).await;
304}
305
306/// Processes a single Wake API request from a single client.
307/// This function is expected to return quickly.
308///
309/// # Args
310/// - `cid`: the unique identifier of the connection producing these requests.
311/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
312/// - `request`: a single inbound Wake FIDL API request.
313async fn handle_request(cid: zx::Koid, mut cmd: mpsc::Sender<Cmd>, request: fta::WakeRequest) {
314    match request {
315        fta::WakeRequest::SetAndWait { deadline, setup_done, alarm_id, responder } => {
316            // Since responder is consumed by the happy path and the error path, but not both,
317            // and because the responder does not implement Default, this is a way to
318            // send it in two mutually exclusive directions.  Each direction will reverse
319            // this wrapping once the responder makes it to the other side.
320            //
321            // Rc required because of sharing a noncopyable struct; RefCell required because
322            // borrow_mut() is needed to move out; and Option is required so we can
323            // use take() to replace the struct with None so it does not need to leave
324            // a Default in its place.
325            let responder = Rc::new(RefCell::new(Some(responder)));
326            let cid = compute_cid(cid, &alarm_id);
327
328            // Alarm is not scheduled yet!
329            debug!(
330                "handle_request: scheduling alarm_id: \"{}\"\n\tcid: {:?}\n\tdeadline: {}",
331                alarm_id,
332                cid,
333                format_timer(deadline.into())
334            );
335            // Expected to return quickly.
336            if let Err(e) = cmd
337                .send(Cmd::Start {
338                    cid,
339                    deadline: deadline.into(),
340                    setup_done,
341                    alarm_id: alarm_id.clone(),
342                    responder: responder.clone(),
343                })
344                .await
345            {
346                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
347                responder
348                    .borrow_mut()
349                    .take()
350                    .expect("always present if call fails")
351                    .send(Err(fta::WakeError::Internal))
352                    .unwrap();
353            }
354        }
355        fta::WakeRequest::Cancel { alarm_id, .. } => {
356            // TODO: b/383062441 - make this into an async task so that we wait
357            // less to schedule the next alarm.
358            handle_cancel(alarm_id, cid, &mut cmd).await;
359        }
360        // Similar to above, but wait for the cancel to complete.
361        fta::WakeRequest::CancelSync { alarm_id, responder, .. } => {
362            handle_cancel(alarm_id, cid, &mut cmd).await;
363            responder.send(Ok(())).expect("infallible");
364        }
365        fta::WakeRequest::GetProperties { responder, .. } => {
366            let response =
367                fta::WakeGetPropertiesResponse { is_supported: Some(true), ..Default::default() };
368            debug!("sending: Wake.GetProperties: {:?}", &response);
369            responder.send(&response).expect("send success");
370        }
371        fta::WakeRequest::_UnknownMethod { .. } => {}
372    };
373}
374
375/// Represents a single alarm event processing loop.
376///
377/// One instance is created per each alarm-capable low-level device.
378pub struct Loop {
379    // The task executing the alarm event processing [Loop].
380    _task: fasync::Task<()>,
381    // Given to any clients that need to send messages to `_task`
382    // via [get_sender].
383    snd_cloneable: mpsc::Sender<Cmd>,
384}
385
386impl Loop {
387    /// Creates a new instance of [Loop].
388    ///
389    /// `device_proxy` is a connection to a low-level timer device.
390    pub fn new(device_proxy: ffhh::DeviceProxy, inspect: finspect::Node) -> Self {
391        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
392        let snd_clone = snd.clone();
393        let _task = fasync::Task::local(async move {
394            wake_timer_loop(snd_clone, rcv, device_proxy, inspect).await
395        });
396        Self { _task, snd_cloneable: snd }
397    }
398
399    /// Gets a copy of a channel through which async commands may be sent to
400    /// the [Loop].
401    fn get_sender(&self) -> mpsc::Sender<Cmd> {
402        self.snd_cloneable.clone()
403    }
404}
405
406/// A representation of the state of a single Timer.
407#[derive(Debug)]
408struct TimerNode {
409    /// The deadline at which the timer expires.
410    deadline: fasync::BootInstant,
411    /// The unique alarm ID associated with this timer.
412    alarm_id: String,
413    /// The unique connection ID that this timer belongs to.  Multiple timers
414    /// may share the same `cid`.
415    cid: zx::Koid,
416    /// The responder that is blocked until the timer expires.  Used to notify
417    /// the alarms subsystem client when this alarm expires.
418    responder: Option<fta::WakeSetAndWaitResponder>,
419}
420
421impl TimerNode {
422    fn new(
423        deadline: fasync::BootInstant,
424        alarm_id: String,
425        cid: zx::Koid,
426        responder: fta::WakeSetAndWaitResponder,
427    ) -> Self {
428        Self { deadline, alarm_id, cid, responder: Some(responder) }
429    }
430
431    fn get_alarm_id(&self) -> &str {
432        &self.alarm_id[..]
433    }
434
435    fn get_cid(&self) -> &zx::Koid {
436        &self.cid
437    }
438
439    fn get_id(&self) -> TimerId {
440        TimerId { alarm_id: self.alarm_id.clone(), cid: self.cid.clone() }
441    }
442
443    fn get_deadline(&self) -> &fasync::BootInstant {
444        &self.deadline
445    }
446
447    fn take_responder(&mut self) -> Option<fta::WakeSetAndWaitResponder> {
448        self.responder.take()
449    }
450}
451
452impl Drop for TimerNode {
453    // If the TimerNode was evicted without having expired, notify the other
454    // end that the timer has been canceled.
455    fn drop(&mut self) {
456        let responder = self.take_responder();
457        responder.map(|r| {
458            // If the TimerNode is dropped, notify the client that may have
459            // been waiting. We can not drop a responder, because that kills
460            // the FIDL connection.
461            r.send(Err(fta::WakeError::Dropped))
462                .map_err(|e| error!("could not drop responder: {:?}", e))
463        });
464    }
465}
466
467/// This and other comparison trait implementation are needed to establish
468/// a total ordering of TimerNodes.
469impl std::cmp::Eq for TimerNode {}
470
471impl std::cmp::PartialEq for TimerNode {
472    fn eq(&self, other: &Self) -> bool {
473        self.deadline == other.deadline && self.alarm_id == other.alarm_id && self.cid == other.cid
474    }
475}
476
477impl std::cmp::PartialOrd for TimerNode {
478    /// Order by deadline first, but timers with same deadline are ordered
479    /// by respective IDs to avoid ordering nondeterminism.
480    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
481        Some(self.cmp(other))
482    }
483}
484
485impl Ord for TimerNode {
486    /// Compares two [TimerNode]s, by "which is sooner".
487    ///
488    /// Ties are broken by alarm ID, then by connection ID.
489    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
490        let ordering = other.deadline.cmp(&self.deadline);
491        if ordering == std::cmp::Ordering::Equal {
492            let ordering = self.alarm_id.cmp(&self.alarm_id);
493            if ordering == std::cmp::Ordering::Equal {
494                self.cid.cmp(&other.cid)
495            } else {
496                ordering
497            }
498        } else {
499            ordering
500        }
501    }
502}
503
504/// A full timer identifier.
505#[derive(Debug, PartialEq, Eq, Hash)]
506struct TimerId {
507    /// Connection-unique alarm ID.
508    alarm_id: String,
509    /// Connection identifier, unique per each client connection.
510    cid: zx::Koid,
511}
512
513impl std::fmt::Display for TimerId {
514    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
515        write!(f, "TimerId[alarm_id:{},cid:{:?}]", self.alarm_id, self.cid)
516    }
517}
518
519/// Contains all the timers known by the alarms subsystem.
520///
521/// [Timers] can efficiently find a timer with the earliest deadline,
522/// and given a cutoff can expire one timer for which the deadline has
523/// passed.
524struct Timers {
525    timers: BinaryHeap<TimerNode>,
526    deadline_by_id: HashMap<TimerId, fasync::BootInstant>,
527}
528
529impl std::fmt::Display for Timers {
530    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
531        let now = fasync::BootInstant::now();
532        let sorted = self
533            .timers
534            .iter()
535            .map(|n| (n.deadline, n.alarm_id.clone()))
536            .collect::<BTreeMap<_, _>>()
537            .into_iter()
538            .map(|(k, v)| {
539                let remaining = k - now;
540                format!(
541                    "Timeout: {} => timer_id: {}, remaining: {}",
542                    format_timer(k.into()),
543                    v,
544                    format_duration(remaining.into())
545                )
546            })
547            .collect::<Vec<_>>();
548        let joined = sorted.join("\n\t");
549        write!(f, "\n\t{}", joined)
550    }
551}
552
553impl Timers {
554    /// Creates an empty [AllTimers].
555    fn new() -> Self {
556        Self { timers: BinaryHeap::new(), deadline_by_id: HashMap::new() }
557    }
558
559    /// Adds a [TimerNode] to [Timers].
560    ///
561    /// If the inserted node is identical to an already existing node, then
562    /// nothing is changed.  If the deadline is different, then the timer node
563    /// is replaced.
564    fn push(&mut self, n: TimerNode) {
565        let new_id = n.get_id();
566        if let Some(deadline) = self.deadline_by_id.get(&new_id) {
567            // There already is a deadline for this timer.
568            if n.deadline == *deadline {
569                return;
570            }
571            // Else replace. The deadline may be pushed out or pulled in.
572            self.deadline_by_id.insert(new_id, n.deadline.clone());
573            self.timers.retain(|t| t.get_id() != n.get_id());
574            self.timers.push(n);
575        } else {
576            // New timer node.
577            self.deadline_by_id.insert(new_id, n.deadline);
578            self.timers.push(n);
579        }
580    }
581
582    /// Returns a reference to the stored timer with the earliest deadline.
583    fn peek(&self) -> Option<&TimerNode> {
584        self.timers.peek()
585    }
586
587    /// Returns the deadline of the proximate timer in [Timers].
588    fn peek_deadline(&self) -> Option<fasync::BootInstant> {
589        self.peek().map(|t| t.deadline)
590    }
591
592    fn peek_id(&self) -> Option<TimerId> {
593        self.peek().map(|t| TimerId { alarm_id: t.alarm_id.clone(), cid: t.cid })
594    }
595
596    /// Args:
597    /// - `now` is the current time.
598    /// - `deadline` is the timer deadline to check for expiry.
599    fn expired(now: fasync::BootInstant, deadline: fasync::BootInstant) -> bool {
600        deadline <= now
601    }
602
603    /// Returns true if there are no known timers.
604    fn is_empty(&self) -> bool {
605        let empty1 = self.timers.is_empty();
606        let empty2 = self.deadline_by_id.is_empty();
607        assert!(empty1 == empty2, "broken invariant: empty1: {} empty2:{}", empty1, empty2);
608        empty1
609    }
610
611    /// Attempts to expire the earliest timer.
612    ///
613    /// If a timer is expired, it is removed from [Timers] and returned to the caller. Note that
614    /// there may be more timers that need expiring at the provided `reference instant`. To drain
615    /// [Timers] of all expired timers, one must repeat the call to this method with the same
616    /// value of `reference_instant` until it returns `None`.
617    ///
618    /// Args:
619    /// - `now`: the time instant to compare the stored timers against.  Timers for
620    ///   which the deadline has been reached or surpassed are eligible for expiry.
621    fn maybe_expire_earliest(&mut self, now: fasync::BootInstant) -> Option<TimerNode> {
622        self.peek_deadline()
623            .map(|d| {
624                if Timers::expired(now, d) {
625                    self.timers.pop().map(|e| {
626                        self.deadline_by_id.remove(&e.get_id());
627                        e
628                    })
629                } else {
630                    None
631                }
632            })
633            .flatten()
634    }
635
636    /// Removes an alarm by ID.  If the earliest alarm is the alarm to be removed,
637    /// it is returned.
638    fn remove_by_id(&mut self, timer_id: &TimerId) -> Option<TimerNode> {
639        let ret = if let Some(t) = self.peek_id() {
640            if t == *timer_id {
641                self.timers.pop()
642            } else {
643                None
644            }
645        } else {
646            None
647        };
648
649        self.timers.retain(|t| t.alarm_id != timer_id.alarm_id || t.cid != timer_id.cid);
650        self.deadline_by_id.remove(timer_id);
651        ret
652    }
653
654    /// Returns the number of currently pending timers.
655    fn timer_count(&self) -> usize {
656        let count1 = self.timers.len();
657        let count2 = self.deadline_by_id.len();
658        assert!(count1 == count2, "broken invariant: count1: {}, count2: {}", count1, count2);
659        count1
660    }
661}
662
663// Clones a handle. Needed for 1:N notifications.
664fn clone_handle<H: HandleBased>(handle: &H) -> H {
665    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
666}
667
668async fn wait_signaled<H: HandleBased>(handle: &H) {
669    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
670}
671
672fn signal<H: HandleBased>(handle: &H) {
673    handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
674}
675
676/// A [TimerDuration] represents a duration of time that can be expressed by
677/// a discrete timer register.
678///
679/// This is a low-level representation of time duration, used in interaction with
680/// hardware devices. It is therefore necessarily discretized, with adaptive
681/// resolution, depending on the physical characteristics of the underlying
682/// hardware timer that it models.
683#[derive(Debug, Clone, Copy)]
684struct TimerDuration {
685    // The resolution of each one of the `ticks` below.
686    resolution: zx::BootDuration,
687    // The number of ticks that encodes time duration. Each "tick" represents
688    // one unit of `resolution` above.
689    ticks: u64,
690}
691
692/// This and the comparison traits below are used to allow TimerDuration
693/// calculations in a compact form.
694impl Eq for TimerDuration {}
695
696impl std::cmp::PartialOrd for TimerDuration {
697    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
698        Some(self.cmp(other))
699    }
700}
701
702impl std::cmp::PartialEq for TimerDuration {
703    fn eq(&self, other: &Self) -> bool {
704        self.cmp(other) == std::cmp::Ordering::Equal
705    }
706}
707
708impl std::cmp::Ord for TimerDuration {
709    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
710    /// no matter the resolutions.
711    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
712        let self_nanos = self.resolution_as_nanos() * self.ticks;
713        let other_nanos = other.resolution_as_nanos() * other.ticks;
714        self_nanos.cmp(&other_nanos)
715    }
716}
717
718impl std::fmt::Display for TimerDuration {
719    /// Human readable TimerDuration exposes both the tick count and the resolution,
720    /// in the format of "ticks x resolution", with an end result of
721    /// `10x5ms` for example.
722    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
723        let ticks = self.ticks;
724        let resolution = self.resolution();
725        // Example: 10x1ms
726        write!(f, "{}x{}", ticks, format_duration(resolution),)
727    }
728}
729
730impl TimerDuration {
731    /// The maximum representable TimerDuration that we allow.
732    fn max() -> Self {
733        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
734    }
735
736    /// The zero [TimerDuration].
737    fn zero() -> Self {
738        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
739    }
740
741    /// Creates a new timer duration with the given parameters.
742    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
743        Self { resolution, ticks }
744    }
745
746    /// Creates a new timer duration using the resolution from `res_source` and
747    /// a specified number of ticks.
748    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
749        Self::new(res_source.resolution, ticks)
750    }
751
752    /// Returns the time duration represented by this TimerDuration.
753    ///
754    /// Due to the way duration is expressed, the same time duration
755    /// can be represented in multiple ways.
756    fn duration(&self) -> zx::BootDuration {
757        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
758        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
759        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
760    }
761
762    /// The resolution of this TimerDuration
763    fn resolution(&self) -> zx::BootDuration {
764        self.resolution
765    }
766
767    fn resolution_as_nanos(&self) -> u64 {
768        self.resolution().into_nanos().try_into().expect("resolution is never negative")
769    }
770
771    /// The number of ticks of this [TimerDuration].
772    fn ticks(&self) -> u64 {
773        self.ticks
774    }
775}
776
777impl From<zx::BootDuration> for TimerDuration {
778    fn from(d: zx::BootDuration) -> TimerDuration {
779        let nanos = d.into_nanos();
780        assert!(nanos >= 0);
781        let nanos_u64 = nanos.try_into().expect("guarded by assert");
782        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
783    }
784}
785
786impl std::ops::Div for TimerDuration {
787    type Output = u64;
788    fn div(self, rhs: Self) -> Self::Output {
789        let self_nanos = self.resolution_as_nanos() * self.ticks;
790        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
791        self_nanos / rhs_nanos
792    }
793}
794
795impl std::ops::Mul<u64> for TimerDuration {
796    type Output = Self;
797    fn mul(self, rhs: u64) -> Self::Output {
798        Self::new(self.resolution, self.ticks * rhs)
799    }
800}
801
802/// Contains the configuration of a specific timer.
803#[derive(Debug)]
804struct TimerConfig {
805    /// The resolutions supported by this timer. Each entry is one possible
806    /// duration for on timer "tick".  The resolution is picked when a timer
807    /// request is sent.
808    resolutions: Vec<zx::BootDuration>,
809    /// The maximum count of "ticks" that the timer supports. The timer usually
810    /// has a register that counts up or down based on a clock signal with
811    /// the period specified by `resolutions`.  This is the maximum value that
812    /// the counter can count to without overflowing.
813    max_ticks: u64,
814    /// The stable ID of the timer with the above configuration.
815    id: u64,
816}
817
818impl TimerConfig {
819    /// Creates a new timer config with supported timer resolutions and the max
820    /// ticks value for the timer's counter.
821    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
822        debug!(
823            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
824            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
825            max_ticks,
826            timer_id
827        );
828        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
829        TimerConfig { resolutions, max_ticks, id: timer_id }
830    }
831
832    fn new_empty() -> Self {
833        error!("TimerConfig::new_empty() called, this is not OK.");
834        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
835    }
836
837    // Picks the most appropriate timer setting for it to fire as close as possible
838    // when `duration` expires.
839    //
840    // If duration is too far in the future for what the timer supports,
841    // return a smaller value, to allow the timer to be reprogrammed multiple
842    // times.
843    //
844    // If the available menu of resolutions is such that we can wake only after
845    // the intended deadline, begrudgingly return that option.
846    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
847        //  0         |-------------->|<---------------|
848        //  |---------+---------------+----------------+---->
849        //  |---------^               |                |
850        //  | best positive slack     |                |
851        //  |-------------------------^ duration       |
852        //  |------------------------------------------^ best negative slack.
853        let mut best_positive_slack = TimerDuration::zero();
854        let mut best_negative_slack = TimerDuration::max();
855
856        if self.max_ticks == 0 {
857            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
858        }
859        let duration_slack: TimerDuration = duration.into();
860
861        for res1 in self.resolutions.iter() {
862            let smallest_unit = TimerDuration::new(*res1, 1);
863            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
864
865            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
866            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
867
868            if smallest_slack_larger_than_duration {
869                if smallest_unit < best_negative_slack {
870                    best_negative_slack = smallest_unit;
871                }
872            }
873            if largest_slack_smaller_than_duration {
874                if max_tick_at_res > best_positive_slack
875                    || best_positive_slack == TimerDuration::zero()
876                {
877                    best_positive_slack = max_tick_at_res;
878                }
879            }
880
881            // "Regular" case.
882            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
883                // Check whether duration divides evenly into the available slack options
884                // for this resolution.  If it does, then that is the slack we're looking for.
885                let q = duration_slack / smallest_unit;
886                let d = smallest_unit * q;
887                if d == duration_slack {
888                    // Exact match, we can return right now.
889                    return d;
890                } else {
891                    // Not an exact match, so q ticks is before, but q+1 is after.
892                    if d > best_positive_slack {
893                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
894                    }
895                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
896                    if d_plus < best_negative_slack {
897                        best_negative_slack = d_plus;
898                    }
899                }
900            }
901        }
902
903        let p_slack = duration - best_positive_slack.duration();
904        let n_slack = best_negative_slack.duration() - duration;
905
906        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
907        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
908        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
909        // us to pick a finer count in the next go.
910        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
911            best_positive_slack
912        } else {
913            best_negative_slack
914        };
915        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
916        assert!(
917            ret.duration().into_nanos() > 0,
918            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
919            ret,
920            format_duration(p_slack),
921            format_duration(n_slack),
922            format_duration(duration),
923            best_positive_slack,
924            best_negative_slack,
925            duration_slack,
926            p_slack != zx::BootDuration::ZERO,
927            p_slack,
928            zx::BootDuration::ZERO,
929        );
930        ret
931    }
932}
933
934async fn get_timer_properties(hrtimer: &ffhh::DeviceProxy) -> TimerConfig {
935    debug!("get_timer_properties: requesting timer properties.");
936    match hrtimer.get_properties().await {
937        Ok(p) => {
938            let timers_properties = &p.timers_properties.expect("timers_properties must exist");
939            debug!("get_timer_properties: got: {:?}", timers_properties);
940
941            // Pick the correct hrtimer to use for wakes.
942            let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
943                // Mostly vim3, where we have pre-existing timer allocations
944                // that we don't need to change.
945                MAIN_TIMER_ID
946            } else if timers_properties.len() > 0 {
947                // Newer devices that don't need to allocate timer IDs, and/or
948                // may not even have as many timers as vim3 does. But, at least
949                // one timer is needed.
950                0
951            } else {
952                // Give up.
953                return TimerConfig::new_empty();
954            };
955            let main_timer_properties = &timers_properties[timer_index];
956            debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
957            // Not sure whether it is useful to have more ticks than this, so limit it.
958            let max_ticks: u64 = std::cmp::min(
959                main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
960                *MAX_USEFUL_TICKS,
961            );
962            let resolutions = &main_timer_properties
963                .supported_resolutions
964                .as_ref()
965                .expect("supported_resolutions is populated")
966                .iter()
967                .last() //  Limits the resolution to the coarsest available.
968                .map(|r| match *r {
969                    ffhh::Resolution::Duration(d) => d,
970                    _ => {
971                        error!(
972                            "get_timer_properties: Unknown resolution type, returning millisecond."
973                        );
974                        MSEC_IN_NANOS
975                    }
976                })
977                .map(|d| zx::BootDuration::from_nanos(d))
978                .into_iter() // Used with .last() above.
979                .collect::<Vec<_>>();
980            let timer_id = main_timer_properties.id.expect("timer ID is always present");
981            TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
982        }
983        Err(e) => {
984            error!("could not get timer properties: {:?}", e);
985            TimerConfig::new_empty()
986        }
987    }
988}
989
990/// The state of a single hardware timer that we must bookkeep.
991struct TimerState {
992    // The task waiting for the proximate timer to expire.
993    task: fasync::Task<()>,
994    // The deadline that the above task is waiting for.
995    deadline: fasync::BootInstant,
996}
997
998/// The command loop for timer interaction.  All changes to the wake alarm device programming
999/// come in form of commands through `cmd`.
1000///
1001/// Args:
1002/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1003/// - `cmds``: the input queue of alarm related commands.
1004/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1005/// - `inspect`: the inspect node to record loop info into.
1006async fn wake_timer_loop(
1007    snd: mpsc::Sender<Cmd>,
1008    mut cmds: mpsc::Receiver<Cmd>,
1009    timer_proxy: ffhh::DeviceProxy,
1010    inspect: finspect::Node,
1011) {
1012    debug!("wake_timer_loop: started");
1013
1014    let mut timers = Timers::new();
1015    let timer_config = get_timer_properties(&timer_proxy).await;
1016
1017    // Keeps the currently executing HrTimer closure.  This is not read from, but
1018    // keeps the timer task active.
1019    #[allow(clippy::collection_is_never_read)]
1020    let mut hrtimer_status: Option<TimerState> = None;
1021
1022    // Initialize inspect properties. This must be done only once.
1023    //
1024    // Take note that these properties are updated when the `cmds` loop runs.
1025    // This means that repeated reads while no `cmds` activity occurs will return
1026    // old readings.  This is to ensure a consistent ability to replay the last
1027    // loop run if needed.
1028    let now_prop = inspect.create_int("now_ns", 0);
1029    let now_formatted_prop = inspect.create_string("now_formatted", "");
1030    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1031    let pending_timers_prop = inspect.create_string("pending_timers", "");
1032    let deadline_histogram_prop = inspect.create_int_exponential_histogram(
1033        "requested_deadlines_ns",
1034        finspect::ExponentialHistogramParams {
1035            floor: 0,
1036            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1037            // Allows capturing deadlines up to dozens of days.
1038            step_multiplier: 10,
1039            buckets: 16,
1040        },
1041    );
1042    // Internals of what was programmed into the wake alarms hardware.
1043    let hw_node = inspect.create_child("hardware");
1044    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1045    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1046
1047    while let Some(cmd) = cmds.next().await {
1048        trace::duration!(c"alarms", c"Cmd");
1049        // Use a consistent notion of "now" across commands.
1050        let now = fasync::BootInstant::now();
1051        now_prop.set(now.into_nanos());
1052        trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1053        match cmd {
1054            Cmd::Start { cid, deadline, setup_done, alarm_id, responder } => {
1055                trace::duration!(c"alarms", c"Cmd::Start");
1056                let responder = responder.borrow_mut().take().expect("responder is always present");
1057                // NOTE: hold keep_alive until all work is done.
1058                debug!(
1059                    "wake_timer_loop: START alarm_id: \"{}\", cid: {:?}\n\tdeadline: {}\n\tnow:      {}",
1060                    alarm_id,
1061                    cid,
1062                    format_timer(deadline.into()),
1063                    format_timer(now.into()),
1064                );
1065                defer! {
1066                    // Must signal once the setup is completed.
1067                    signal(&setup_done);
1068                    debug!("wake_timer_loop: START: setup_done signaled");
1069                };
1070                deadline_histogram_prop.insert((deadline - now).into_nanos());
1071                if Timers::expired(now, deadline) {
1072                    trace::duration!(c"alarms", c"Cmd::Start:immediate");
1073                    // A timer set into now or the past expires right away.
1074                    let (_lease, keep_alive) = zx::EventPair::create();
1075                    debug!(
1076                        "[{}] wake_timer_loop: bogus lease {:?}",
1077                        line!(),
1078                        &keep_alive.get_koid().unwrap()
1079                    );
1080                    responder
1081                        .send(Ok(keep_alive))
1082                        .map(|_| {
1083                            debug!(
1084                                concat!(
1085                                    "wake_timer_loop: cid: {:?}, alarm: {}: EXPIRED IMMEDIATELY\n\t",
1086                                    "deadline({}) <= now({})"
1087                                ),
1088                                cid,
1089                                alarm_id,
1090                                format_timer(deadline.into()),
1091                                format_timer(now.into())
1092                            )
1093                        })
1094                        .map_err(|e| {
1095                            error!(
1096                            "wake_timer_loop: cid: {:?}, alarm: {}: could not notify, dropping: {}",
1097                                cid, alarm_id, e)
1098                        })
1099                        .unwrap_or(());
1100                } else {
1101                    trace::duration!(c"alarms", c"Cmd::Start:regular");
1102                    // A timer scheduled for the future gets inserted into the timer heap.
1103                    let was_empty = timers.is_empty();
1104
1105                    let deadline_before = timers.peek_deadline();
1106                    timers.push(TimerNode::new(deadline, alarm_id, cid, responder));
1107                    let deadline_after = timers.peek_deadline();
1108
1109                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1110                    let needs_cancel = !was_empty && deadline_changed;
1111                    let needs_reschedule = was_empty || deadline_changed;
1112
1113                    if needs_reschedule {
1114                        // Always schedule the proximate deadline.
1115                        let schedulable_deadline = deadline_after.unwrap_or(deadline);
1116                        if needs_cancel {
1117                            stop_hrtimer(&timer_proxy, &timer_config).await;
1118                        }
1119                        hrtimer_status = Some(
1120                            schedule_hrtimer(
1121                                now,
1122                                &timer_proxy,
1123                                schedulable_deadline,
1124                                snd.clone(),
1125                                &timer_config,
1126                            )
1127                            .await,
1128                        );
1129                    }
1130                }
1131            }
1132            Cmd::StopById { timer_id, done } => {
1133                trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => &timer_id.alarm_id[..]);
1134                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1135                let deadline_before = timers.peek_deadline();
1136
1137                if let Some(mut timer_node) = timers.remove_by_id(&timer_id) {
1138                    let deadline_after = timers.peek_deadline();
1139
1140                    if let Some(responder) = timer_node.take_responder() {
1141                        // We must reply to the responder to keep the connection open.
1142                        responder.send(Err(fta::WakeError::Dropped)).expect("infallible");
1143                    }
1144                    if is_deadline_changed(deadline_before, deadline_after) {
1145                        stop_hrtimer(&timer_proxy, &timer_config).await;
1146                    }
1147                    if let Some(deadline) = deadline_after {
1148                        // Reschedule the hardware timer if the removed timer is the earliest one,
1149                        // and another one exists.
1150                        let new_timer_state = schedule_hrtimer(
1151                            now,
1152                            &timer_proxy,
1153                            deadline,
1154                            snd.clone(),
1155                            &timer_config,
1156                        )
1157                        .await;
1158                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1159                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1160                            // Allow the task to complete, I suppose.
1161                            task.await;
1162                        }
1163                    } else {
1164                        // No next timer, clean up the hrtimer status.
1165                        hrtimer_status = None;
1166                    }
1167                } else {
1168                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1169                }
1170                signal(&done);
1171            }
1172            Cmd::Alarm { expired_deadline, keep_alive } => {
1173                trace::duration!(c"alarms", c"Cmd::Alarm");
1174                // Expire all eligible timers, based on "now".  This is because
1175                // we may have woken up earlier than the actual deadline. This
1176                // happens for example if the timer can not make the actual
1177                // deadline and needs to be re-programmed.
1178                debug!(
1179                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1180                    format_timer(expired_deadline.into()),
1181                    keep_alive.get_koid().unwrap(),
1182                );
1183                let expired_count =
1184                    notify_all(&mut timers, &keep_alive, now).expect("notification succeeds");
1185                if expired_count == 0 {
1186                    // This could be a resolution switch, or a straggler notification.
1187                    // Either way, the hardware timer is still ticking, cancel it.
1188                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1189                    stop_hrtimer(&timer_proxy, &timer_config).await;
1190                }
1191                // There is a timer to reschedule, do that now.
1192                hrtimer_status = match timers.peek_deadline() {
1193                    None => None,
1194                    Some(deadline) => Some(
1195                        schedule_hrtimer(now, &timer_proxy, deadline, snd.clone(), &timer_config)
1196                            .await,
1197                    ),
1198                }
1199            }
1200            Cmd::AlarmFidlError { expired_deadline, error } => {
1201                trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1202                // We do not have a wake lease, so the system may sleep before
1203                // we get to schedule a new timer. We have no way to avoid it
1204                // today.
1205                warn!(
1206                    "wake_timer_loop: FIDL error: {:?}, deadline: {}, now: {}",
1207                    error,
1208                    format_timer(expired_deadline.into()),
1209                    format_timer(now.into()),
1210                );
1211                // Manufacture a fake lease to make the code below work.
1212                // Maybe use Option instead?
1213                let (_dummy_lease, peer) = zx::EventPair::create();
1214                debug!("XXX: [{}] bogus lease: 1 {:?}", line!(), &peer.get_koid().unwrap());
1215                notify_all(&mut timers, &peer, now).expect("notification succeeds");
1216                hrtimer_status = match timers.peek_deadline() {
1217                    None => None, // No remaining timers, nothing to schedule.
1218                    Some(deadline) => Some(
1219                        schedule_hrtimer(now, &timer_proxy, deadline, snd.clone(), &timer_config)
1220                            .await,
1221                    ),
1222                }
1223            }
1224            Cmd::AlarmDriverError { expired_deadline, error } => {
1225                trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1226                let (_dummy_lease, peer) = zx::EventPair::create();
1227                debug!("XXX: [{}] bogus lease: {:?}", line!(), &peer.get_koid().unwrap());
1228                notify_all(&mut timers, &peer, now).expect("notification succeeds");
1229                match error {
1230                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1231                        // Nothing to do here, cancelation is handled in Stop code.
1232                        debug!(
1233                            "wake_timer_loop: CANCELED timer at deadline: {}",
1234                            format_timer(expired_deadline.into())
1235                        );
1236                    }
1237                    _ => {
1238                        error!(
1239                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}",
1240                            error,
1241                            format_timer(expired_deadline.into()),
1242                            format_timer(now.into()),
1243                        );
1244                        // We do not have a wake lease, so the system may sleep before
1245                        // we get to schedule a new timer. We have no way to avoid it
1246                        // today.
1247                        hrtimer_status = match timers.peek_deadline() {
1248                            None => None,
1249                            Some(deadline) => Some(
1250                                schedule_hrtimer(
1251                                    now,
1252                                    &timer_proxy,
1253                                    deadline,
1254                                    snd.clone(),
1255                                    &timer_config,
1256                                )
1257                                .await,
1258                            ),
1259                        }
1260                    }
1261                }
1262            }
1263        }
1264
1265        {
1266            // Print and record diagnostics after each iteration, record the
1267            // duration for performance awareness.  Note that iterations happen
1268            // only occasionally, so these stats can remain unchanged for a long
1269            // time.
1270            trace::duration!(c"timekeeper", c"inspect");
1271            let now_formatted = format_timer(now.into());
1272            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1273            now_formatted_prop.set(&now_formatted);
1274
1275            let pending_timers_count: u64 =
1276                timers.timer_count().try_into().expect("always convertible");
1277            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1278            pending_timers_count_prop.set(pending_timers_count);
1279
1280            let pending_timers = format!("{}", timers);
1281            debug!("wake_timer_loop: currently pending timers:        {}", &timers);
1282            pending_timers_prop.set(&pending_timers);
1283
1284            let current_deadline: String = hrtimer_status
1285                .as_ref()
1286                .map(|s| format!("{}", format_timer(s.deadline.into())))
1287                .unwrap_or_else(|| "(none)".into());
1288            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1289            current_hw_deadline_prop.set(&current_deadline);
1290
1291            let remaining_duration_until_alarm = hrtimer_status
1292                .as_ref()
1293                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1294                .unwrap_or_else(|| "(none)".into());
1295            debug!(
1296                "wake_timer_loop: remaining duration until alarm:  {}",
1297                &remaining_duration_until_alarm
1298            );
1299            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1300            debug!("---");
1301        }
1302    }
1303
1304    debug!("wake_timer_loop: exiting. This is unlikely in prod code.");
1305}
1306
1307/// Schedules a wake alarm.
1308///
1309/// Args:
1310/// - `now`: the time instant used as the value of current instant.
1311/// - `hrtimer`: the proxy for the hrtimer device driver.
1312/// - `deadline`: the time instant in the future at which the alarm should fire.
1313/// - `command_send`: the sender channel to use when the timer expires.
1314/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1315///   max tick value.
1316/// - `needs_cancel`: if set, we must first cancel a hrtimer before scheduling a new one.
1317async fn schedule_hrtimer(
1318    now: fasync::BootInstant,
1319    hrtimer: &ffhh::DeviceProxy,
1320    deadline: fasync::BootInstant,
1321    mut command_send: mpsc::Sender<Cmd>,
1322    timer_config: &TimerConfig,
1323) -> TimerState {
1324    let timeout = deadline - now;
1325    trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1326    assert!(
1327        now < deadline,
1328        "now: {}, deadline: {}, diff: {}",
1329        format_timer(now.into()),
1330        format_timer(deadline.into()),
1331        format_duration(timeout),
1332    );
1333    // When signaled, the hrtimer has been scheduled.
1334    let hrtimer_scheduled = zx::Event::create();
1335
1336    debug!(
1337        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1338        format_timer(now.into()),
1339        format_timer(deadline.into()),
1340        format_duration(timeout),
1341    );
1342
1343    let slack = timer_config.pick_setting(timeout);
1344
1345    let resolution_nanos = slack.resolution.into_nanos();
1346    let ticks = slack.ticks();
1347    trace::instant!(c"alarms", c"hrtimer:programmed",
1348        trace::Scope::Process,
1349        "resolution_ns" => resolution_nanos,
1350        "ticks" => ticks
1351    );
1352    let start_and_wait_fut = hrtimer.start_and_wait(
1353        timer_config.id,
1354        &ffhh::Resolution::Duration(resolution_nanos),
1355        ticks,
1356        clone_handle(&hrtimer_scheduled),
1357    );
1358    let hrtimer_task = fasync::Task::local(async move {
1359        debug!("hrtimer_task: waiting for hrtimer driver response");
1360        trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1361        let response = start_and_wait_fut.await;
1362        trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1363        match response {
1364            Err(e) => {
1365                trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1366                debug!("hrtimer_task: hrtimer FIDL error: {:?}", e);
1367                command_send
1368                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1369                    .unwrap();
1370                // BAD: no way to keep alive.
1371            }
1372            Ok(Err(e)) => {
1373                let driver_error_str = format!("{:?}", e);
1374                trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1375                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1376                command_send
1377                    .start_send(Cmd::AlarmDriverError { expired_deadline: now, error: e })
1378                    .unwrap();
1379                // BAD: no way to keep alive.
1380            }
1381            Ok(Ok(keep_alive)) => {
1382                trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1383                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1384                // May trigger sooner than the deadline.
1385                command_send
1386                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1387                    .unwrap();
1388            }
1389        }
1390        debug!("hrtimer_task: exiting task.");
1391        trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1392    });
1393    debug!("schedule_hrtimer: waiting for event to be signaled");
1394
1395    // We must wait here to ensure that the wake alarm has been scheduled.
1396    wait_signaled(&hrtimer_scheduled).await;
1397    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1398    TimerState { task: hrtimer_task, deadline }
1399}
1400
1401/// Notify all `timers` that `reference_instant` has been reached.
1402///
1403/// The notified `timers` are removed from the list of timers to notify.
1404///
1405/// Args:
1406/// - `timers`: the collection of currently available timers.
1407/// - `lease_prototype`: an EventPair used as a wake lease.
1408/// - `reference_instant`: the time instant used as a reference for alarm notification.
1409///   All timers
1410fn notify_all(
1411    timers: &mut Timers,
1412    lease_prototype: &zx::EventPair,
1413    reference_instant: fasync::BootInstant,
1414) -> Result<usize> {
1415    trace::duration!(c"alarms", c"notify_all");
1416    let now = fasync::BootInstant::now();
1417    let mut expired = 0;
1418    while let Some(mut timer_node) = timers.maybe_expire_earliest(reference_instant) {
1419        expired += 1;
1420        // How much later than requested did the notification happen.
1421        let deadline = *timer_node.get_deadline();
1422        let alarm_id = timer_node.get_alarm_id().to_string();
1423        let cid = timer_node.get_cid().clone();
1424        let slack: zx::BootDuration = deadline - now;
1425        debug!(
1426            concat!(
1427                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tcid: {:?},\n\t",
1428                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1429            ),
1430            alarm_id,
1431            format_timer(deadline.into()),
1432            cid,
1433            format_timer(reference_instant.into()),
1434            format_timer(now.into()),
1435            format_duration(slack),
1436        );
1437        let lease = clone_handle(lease_prototype);
1438        trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "cid" => cid);
1439        let _ = timer_node
1440            .take_responder()
1441            .map(|r| r.send(Ok(lease)))
1442            .map_or_else(|| Ok(()), |res| res)
1443            .map_err(|e| error!("could not signal responder: {:?}", e));
1444        trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1445    }
1446    trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1447    debug!("notify_all: expired count: {}", expired);
1448    Ok(expired)
1449    // A new timer is not scheduled yet here.
1450}
1451
1452/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1453/// named files in this directory. They are expected to come and go.
1454const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1455
1456/// Connects to the high resolution timer device driver.
1457pub fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1458    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1459    let mut dir = std::fs::read_dir(HRTIMER_DIRECTORY)
1460        .map_err(|e| anyhow!("Failed to open hrtimer directory: {e}"))?;
1461    let entry = dir
1462        .next()
1463        .ok_or_else(|| anyhow!("No entry in the hrtimer directory"))?
1464        .map_err(|e| anyhow!("Failed to find hrtimer device: {e}"))?;
1465    let path = entry
1466        .path()
1467        .into_os_string()
1468        .into_string()
1469        .map_err(|e| anyhow!("Failed to parse the device entry path: {e:?}"))?;
1470
1471    let (hrtimer, server_end) = fidl::endpoints::create_proxy::<ffhh::DeviceMarker>();
1472    fdio::service_connect(&path, server_end.into_channel())
1473        .map_err(|e| anyhow!("Failed to open hrtimer device: {e}"))?;
1474
1475    Ok(hrtimer)
1476}
1477
1478#[cfg(test)]
1479mod tests {
1480    use super::*;
1481    use diagnostics_assertions::{assert_data_tree, AnyProperty};
1482    use futures::{select, Future};
1483    use std::task::Poll;
1484    use test_case::test_case;
1485    use test_util::{assert_gt, assert_lt};
1486
1487    // A test fixture function that sets up the fake wake alarms machinery.
1488    //
1489    // The user supplies a factory function with the async code to run.
1490    //
1491    // Args:
1492    //  - `run_for_duration`: the amount of fake time that the test should run for.
1493    //  - `test_fn_factory`: a normal function, which takes a WakeProxy, and returns
1494    //    an async closure that the test should run.
1495    fn run_in_fake_time_and_test_context<F, U, T>(
1496        run_for_duration: zx::MonotonicDuration,
1497        test_fn_factory: F,
1498    ) where
1499        F: FnOnce(fta::WakeProxy, finspect::Inspector) -> U, // F returns an async closure.
1500        U: Future<Output = T> + 'static, // the async closure may return an arbitrary type T.
1501        T: 'static,
1502    {
1503        let mut exec = fasync::TestExecutor::new_with_fake_time(); // We will be running this test case in fake time.
1504        exec.set_fake_time(fasync::MonotonicInstant::from_nanos(0));
1505        let (mut fake_commands_in, fake_commands_out) = mpsc::channel::<FakeCmd>(0);
1506        let (hrtimer_proxy, hrtimer_task) = fake_hrtimer_connection(fake_commands_out);
1507        let inspector = finspect::component::inspector();
1508        let alarms = Rc::new(Loop::new(hrtimer_proxy, inspector.root().create_child("test")));
1509
1510        let (_handle, peer) = zx::EventPair::create();
1511
1512        let done_set_properties = zx::Event::create();
1513        let begin_test = clone_handle(&done_set_properties);
1514        let begin_serve = clone_handle(&done_set_properties);
1515
1516        let mut fake_commands_in_clone = fake_commands_in.clone();
1517        let config_task = async move {
1518            fake_commands_in
1519                .start_send(FakeCmd::SetProperties {
1520                    resolutions: vec![zx::Duration::from_nanos(43)],
1521                    max_ticks: 100,
1522                    keep_alive: peer,
1523                    done: clone_handle(&done_set_properties),
1524                })
1525                .unwrap();
1526        };
1527
1528        let (wake_proxy, wake_stream) =
1529            fidl::endpoints::create_proxy_and_stream::<fta::WakeMarker>();
1530
1531        let serving_task = async move {
1532            fasync::OnSignals::new(begin_serve, zx::Signals::EVENT_SIGNALED).await.unwrap();
1533            serve(alarms, wake_stream).await;
1534        };
1535
1536        let seq_fn_fut = test_fn_factory(wake_proxy, inspector.clone());
1537
1538        let test_task = async move {
1539            // Wait until configuration has completed.
1540            fasync::OnSignals::new(begin_test, zx::Signals::EVENT_SIGNALED).await.unwrap();
1541
1542            let result = seq_fn_fut.await;
1543
1544            // Request orderly shutdown.
1545            fake_commands_in_clone.start_send(FakeCmd::Exit).unwrap();
1546            result
1547        };
1548
1549        let mut main_fut = fasync::Task::local(async {
1550            let _r = futures::join!(hrtimer_task, config_task, serving_task, test_task);
1551        });
1552        run_in_fake_time(&mut exec, &mut main_fut, run_for_duration);
1553    }
1554
1555    // A loop that moves fake time forward in small increments, waking timers along the way.
1556    //
1557    // In almost all tests, we set up the environment for the test to run in, under a
1558    // test executor running in fake time. We then submit the resulting future
1559    // to this function for execution.
1560    //
1561    // This has been taken from //src/ui/lib/input_pipeline/src/autorepeater.rs
1562    // with some adaptation.
1563    fn run_in_fake_time<F>(
1564        executor: &mut fasync::TestExecutor,
1565        main_fut: &mut F,
1566        total_duration: zx::MonotonicDuration,
1567    ) where
1568        F: Future<Output = ()> + Unpin,
1569    {
1570        const INCREMENT: zx::MonotonicDuration = zx::MonotonicDuration::from_nanos(13);
1571        let mut current = zx::MonotonicDuration::ZERO;
1572        let mut poll_status = Poll::Pending;
1573
1574        // We run until either the future completes or the timeout is reached,
1575        // whichever comes first.
1576        // Running the future after it returns Poll::Ready is not allowed, so
1577        // we must exit the loop then.
1578        while current < (total_duration + INCREMENT) && poll_status == Poll::Pending {
1579            let next = executor.now() + INCREMENT;
1580            executor.set_fake_time(next);
1581            executor.wake_expired_timers();
1582            poll_status = executor.run_until_stalled(main_fut);
1583            current = current + INCREMENT;
1584        }
1585        let now = executor.now();
1586        assert_eq!(
1587            poll_status,
1588            Poll::Ready(()),
1589            "the main future did not complete at {}, perhaps increase total_duration?",
1590            format_timer(now.into())
1591        );
1592    }
1593
1594    // Human readable duration formatting is useful.
1595    #[test_case(0, "0ns (0)" ; "zero")]
1596    #[test_case(1000, "1μs (1000)" ; "1us positive")]
1597    #[test_case(-1000, "-1μs (-1000)"; "1us negative")]
1598    #[test_case(YEAR_IN_NANOS, "1year(s) (31536000000000000)"; "A year")]
1599    #[test_case(YEAR_IN_NANOS + 8 * DAY_IN_NANOS + 1,
1600        "1year(s)_1week(s)_1day(s)_1ns (32227200000000001)" ; "A weird duration")]
1601    #[test_case(2 * HOUR_IN_NANOS + 8 * MIN_IN_NANOS + 32 * SEC_IN_NANOS + 1,
1602        "2h_8min_32s_1ns (7712000000001)" ; "A reasonable long duration")]
1603    fn test_format_common(value: i64, repr: &str) {
1604        assert_eq!(format_common(value), repr.to_string());
1605    }
1606
1607    #[test_case(
1608        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1609        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1610    )]
1611    #[test_case(
1612        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1613        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1614    )]
1615    #[test_case(
1616        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1617        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1618    )]
1619    #[test_case(
1620        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1621        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1622    )]
1623    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1624        assert_eq!(one, other);
1625    }
1626
1627    #[test_case(
1628        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1629        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1630    )]
1631    #[test_case(
1632        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1633        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1634    )]
1635    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1636        assert_lt!(one, other);
1637    }
1638
1639    #[test_case(
1640        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1641        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1642    )]
1643    #[test_case(
1644        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1645        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1646    )]
1647    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1648        assert_gt!(one, other);
1649    }
1650
1651    #[test_case(
1652        vec![zx::BootDuration::from_nanos(1)],
1653        100,
1654        zx::BootDuration::from_nanos(50),
1655        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1656    )]
1657    #[test_case(
1658        vec![zx::BootDuration::from_nanos(2)],
1659        100,
1660        zx::BootDuration::from_nanos(50),
1661        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1662    )]
1663    #[test_case(
1664        vec![zx::BootDuration::from_nanos(3)],
1665        100,
1666        zx::BootDuration::from_nanos(50),
1667        // The closest duration is 51ns.
1668        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1669    )]
1670    #[test_case(
1671        vec![
1672            zx::BootDuration::from_nanos(3),
1673            zx::BootDuration::from_nanos(4)
1674        ],
1675        100,
1676        zx::BootDuration::from_nanos(50),
1677        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1678    )]
1679    #[test_case(
1680        vec![
1681            zx::BootDuration::from_nanos(1000),
1682        ],
1683        100,
1684        zx::BootDuration::from_nanos(50),
1685        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1686        "950ns negative slack is the best we can do"
1687    )]
1688    #[test_case(
1689        vec![
1690            zx::BootDuration::from_nanos(1),
1691        ],
1692        10,
1693        zx::BootDuration::from_nanos(50),
1694        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1695        "10ns positive slack is the best we can do"
1696    )]
1697    #[test_case(
1698        vec![
1699            zx::BootDuration::from_millis(1),
1700            zx::BootDuration::from_micros(100),
1701            zx::BootDuration::from_micros(10),
1702            zx::BootDuration::from_micros(1),
1703        ],
1704        20,  // Make only one of the resolutions above match.
1705        zx::BootDuration::from_micros(150),
1706        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1707        "Realistic case with resolutions from driver, should be 15us"
1708    )]
1709    #[test_case(
1710        vec![
1711            zx::BootDuration::from_millis(1),
1712            zx::BootDuration::from_micros(100),
1713            zx::BootDuration::from_micros(10),
1714            zx::BootDuration::from_micros(1),
1715        ],
1716        2000,  // Make only one of the resolutions above match.
1717        zx::BootDuration::from_micros(6000),
1718        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1719        "Coarser exact unit wins"
1720    )]
1721    fn test_pick_setting(
1722        resolutions: Vec<zx::BootDuration>,
1723        max_ticks: u64,
1724        duration: zx::BootDuration,
1725        expected: TimerDuration,
1726    ) {
1727        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1728        let actual = config.pick_setting(duration);
1729
1730        // .eq() does not work here, since we do not just require that the values
1731        // be equal, but also that the same resolution is used in both.
1732        assert_slack_eq(expected, actual);
1733    }
1734
1735    // TimerDuration assertion with human-friendly output in case of an error.
1736    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1737        let slack = expected.duration() - actual.duration();
1738        assert_eq!(
1739            actual.resolution(),
1740            expected.resolution(),
1741            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1742            expected,
1743            format_duration(expected.duration()),
1744            actual,
1745            format_duration(actual.duration()),
1746            format_duration(slack)
1747        );
1748        assert_eq!(
1749            actual.ticks(),
1750            expected.ticks(),
1751            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1752            expected,
1753            format_duration(expected.duration()),
1754            actual,
1755            format_duration(actual.duration()),
1756            format_duration(slack)
1757        );
1758    }
1759
1760    #[derive(Debug)]
1761    enum FakeCmd {
1762        SetProperties {
1763            resolutions: Vec<zx::BootDuration>,
1764            max_ticks: i64,
1765            keep_alive: zx::EventPair,
1766            done: zx::Event,
1767        },
1768        Exit,
1769    }
1770
1771    use std::cell::RefCell;
1772    use std::rc::Rc;
1773
1774    // A fake that emulates some aspects of the hrtimer driver.
1775    //
1776    // Specifically it can be configured with different resolutions, and will
1777    // bomb out if any waiting methods are called twice in a succession, without
1778    // canceling the timer in between.
1779    fn fake_hrtimer_connection(
1780        rcv: mpsc::Receiver<FakeCmd>,
1781    ) -> (ffhh::DeviceProxy, fasync::Task<()>) {
1782        debug!("fake_hrtimer_connection: entry.");
1783        let (hrtimer, mut stream) =
1784            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
1785        let task = fasync::Task::local(async move {
1786            let mut rcv = rcv.fuse();
1787            let timer_properties = Rc::new(RefCell::new(None));
1788            let wake_lease = Rc::new(RefCell::new(None));
1789
1790            // Set to true when the hardware timer is supposed to be running.
1791            // Hardware timer may not be reprogrammed without canceling it first,
1792            // make sure the tests fail the same way as production would.
1793            let timer_running = Rc::new(RefCell::new(false));
1794
1795            loop {
1796                let timer_properties = timer_properties.clone();
1797                let wake_lease = wake_lease.clone();
1798                select! {
1799                    cmd = rcv.next() => {
1800                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
1801                        match cmd {
1802                            Some(FakeCmd::Exit) => { break; }
1803                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
1804                                let mut timer_props = vec![];
1805                                for v in 0..10 {
1806                                    timer_props.push(ffhh::TimerProperties {
1807                                        supported_resolutions: Some(
1808                                            resolutions.iter()
1809                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
1810                                        max_ticks: Some(max_ticks.try_into().unwrap()),
1811                                        // start_and_wait method works.
1812                                        supports_wait: Some(true),
1813                                        id: Some(v),
1814                                        ..Default::default()
1815                                        },
1816                                    );
1817                                }
1818                                *timer_properties.borrow_mut() = Some(timer_props);
1819                                *wake_lease.borrow_mut() = Some(keep_alive);
1820                                debug!("set timer properties to: {:?}", timer_properties);
1821                                signal(&done);
1822                            }
1823                            e => {
1824                                panic!("unrecognized command: {:?}", e);
1825                            }
1826                        }
1827                        // Set some responses if we have them.
1828                    },
1829                    event = stream.next() => {
1830                        debug!("fake_hrtimer_connection: event: {:?}", event);
1831                        if let Some(Ok(event)) = event {
1832                            match event {
1833                                ffhh::DeviceRequest::Start { responder, .. } => {
1834                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1835                                    *timer_running.borrow_mut() = true;
1836                                    responder.send(Ok(())).expect("");
1837                                }
1838                                ffhh::DeviceRequest::Stop { responder, .. } => {
1839                                    *timer_running.borrow_mut() = false;
1840                                    responder.send(Ok(())).expect("");
1841                                }
1842                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
1843                                    responder.send(Ok(1)).expect("");
1844                                }
1845                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
1846                                    responder.send(Ok(())).expect("");
1847                                }
1848                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
1849                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1850                                    *timer_running.borrow_mut() = true;
1851                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
1852                                    let ticks: i64 = ticks.try_into().unwrap();
1853                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
1854                                        ffhh::Resolution::Duration(e) => e,
1855                                        _ => {
1856                                            error!("resolution has an unexpected value");
1857                                            1
1858                                        }
1859                                    });
1860                                    let timer_running_clone = timer_running.clone();
1861                                    fasync::Task::local(async move {
1862                                        // Respond after the requested sleep time. In tests this will
1863                                        // be sleeping in fake time.
1864                                        fasync::Timer::new(sleep_duration).await;
1865                                        *timer_running_clone.borrow_mut() = false;
1866                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
1867
1868                                        // Signaling the setup event allows the client to proceed
1869                                        // with post-scheduling work.
1870                                        signal(&setup_event);
1871
1872                                    }).detach();
1873                                }
1874                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
1875                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1876                                    *timer_running.borrow_mut() = true;
1877                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
1878                                }
1879                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
1880                                    if (*timer_properties).borrow().is_none() {
1881                                        error!("timer_properties is empty, this is not what you want!");
1882                                    }
1883                                    responder
1884                                        .send(ffhh::Properties {
1885                                            timers_properties: (*timer_properties).borrow().clone(),
1886                                            ..Default::default()
1887                                        })
1888                                        .expect("");
1889                                }
1890                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
1891                            }
1892                        }
1893                    },
1894                }
1895            }
1896            debug!("fake_hrtimer_connection: exiting");
1897        });
1898        (hrtimer, task)
1899    }
1900
1901    #[fuchsia::test]
1902    fn test_basic_timed_wait() {
1903        let deadline = zx::BootInstant::from_nanos(100);
1904        let test_duration = zx::MonotonicDuration::from_nanos(110);
1905        run_in_fake_time_and_test_context(test_duration, |wake_proxy, _| async move {
1906            let keep_alive = zx::Event::create();
1907
1908            wake_proxy
1909                .set_and_wait(deadline.into(), keep_alive, "Hello".into())
1910                .await
1911                .unwrap()
1912                .unwrap();
1913
1914            assert_gt!(fasync::BootInstant::now().into_nanos(), deadline.into_nanos());
1915        });
1916    }
1917
1918    #[test_case(
1919        zx::BootInstant::from_nanos(100),
1920        zx::BootInstant::from_nanos(200),
1921        zx::MonotonicDuration::from_nanos(250) ;
1922        "Two timers: one at 100 and another at 200 ns"
1923    )]
1924    #[test_case(
1925        zx::BootInstant::from_nanos(100),
1926        zx::BootInstant::from_nanos(100),
1927        // A tight end-of-test will detect a stuck timer.
1928        zx::MonotonicDuration::from_nanos(104) ;
1929        "Two timers at the same deadline."
1930    )]
1931    #[test_case(
1932        zx::BootInstant::from_nanos(-1),
1933        zx::BootInstant::from_nanos(-1),
1934        zx::MonotonicDuration::from_nanos(30) ;
1935        "Two timers expire immediately."
1936    )]
1937    #[fuchsia::test]
1938    fn test_timed_wait_two_timers_params(
1939        // One timer scheduled at this instant (fake time starts from zero).
1940        first_deadline: zx::BootInstant,
1941        // Another timer scheduled at this instant.
1942        second_deadline: zx::BootInstant,
1943        // Run the fake time for this long.
1944        duration: zx::MonotonicDuration,
1945    ) {
1946        run_in_fake_time_and_test_context(duration, |wake_proxy, _| async move {
1947            let lease1 = zx::Event::create();
1948            let fut1 = wake_proxy.set_and_wait(first_deadline.into(), lease1, "Hello1".into());
1949
1950            let lease2 = zx::Event::create();
1951            let fut2 = wake_proxy.set_and_wait(second_deadline.into(), lease2, "Hello2".into());
1952
1953            let (result1, result2) = futures::join!(fut1, fut2);
1954
1955            result1.unwrap().unwrap();
1956            result2.unwrap().unwrap();
1957
1958            assert_gt!(fasync::BootInstant::now().into_nanos(), first_deadline.into_nanos());
1959            assert_gt!(fasync::BootInstant::now().into_nanos(), second_deadline.into_nanos());
1960        });
1961    }
1962
1963    #[test_case(
1964        zx::BootInstant::from_nanos(100),
1965        zx::BootInstant::from_nanos(200),
1966        zx::MonotonicDuration::from_nanos(250) ;
1967        "Reschedule with push-out"
1968    )]
1969    #[test_case(
1970        zx::BootInstant::from_nanos(100),
1971        zx::BootInstant::from_nanos(100),
1972        // A tight end-of-test will detect a stuck timer.
1973        zx::MonotonicDuration::from_nanos(104) ;
1974        "Reschedule with same deadline"
1975    )]
1976    #[test_case(
1977        zx::BootInstant::from_nanos(200),
1978        zx::BootInstant::from_nanos(100),
1979        // A tight end-of-test will detect a stuck timer.
1980        zx::MonotonicDuration::from_nanos(240) ;
1981        "Pull in"
1982    )]
1983    #[fuchsia::test]
1984    fn test_timed_wait_same_timer(
1985        // One timer scheduled at this instant (fake time starts from zero).
1986        first_deadline: zx::BootInstant,
1987        // Another timer scheduled at this instant.
1988        second_deadline: zx::BootInstant,
1989        // Run the fake time for this long.
1990        duration: zx::MonotonicDuration,
1991    ) {
1992        run_in_fake_time_and_test_context(duration, |wake_proxy, _| async move {
1993            let lease1 = zx::Event::create();
1994
1995            wake_proxy
1996                .set_and_wait(first_deadline.into(), lease1, "Hello".into())
1997                .await
1998                .unwrap()
1999                .unwrap();
2000            let lease2 = zx::Event::create();
2001            wake_proxy
2002                .set_and_wait(second_deadline.into(), lease2, "Hello2".into())
2003                .await
2004                .unwrap()
2005                .unwrap();
2006        });
2007    }
2008
2009    // Test what happens when we schedule a timer, then change our mind and
2010    // reschedule the same timer, but with a sooner deadline.
2011    #[fuchsia::test]
2012    fn test_reschedule_pull_in() {
2013        const LONG_DEADLINE_NANOS: i64 = 200;
2014        const SHORT_DEADLINE_NANOS: i64 = 100;
2015        const ALARM_ID: &str = "Hello";
2016        run_in_fake_time_and_test_context(
2017            zx::MonotonicDuration::from_nanos(LONG_DEADLINE_NANOS + 50),
2018            |wake_proxy, _| async move {
2019                let wake_proxy = Rc::new(RefCell::new(wake_proxy));
2020
2021                let keep_alive = zx::Event::create();
2022
2023                let (mut sync_send, mut sync_recv) = mpsc::channel(1);
2024
2025                // Schedule timer with a long timeout first. Let it wait, then
2026                // try to reschedule the same timer
2027                let wake_proxy_clone = wake_proxy.clone();
2028                let long_deadline_fut = async move {
2029                    let wake_fut = wake_proxy_clone.borrow().set_and_wait(
2030                        zx::BootInstant::from_nanos(LONG_DEADLINE_NANOS).into(),
2031                        keep_alive,
2032                        ALARM_ID.into(),
2033                    );
2034                    // Allow the rest of the test to proceed from here.
2035                    sync_send.send(()).await.unwrap();
2036
2037                    // Yield-wait for the first scheduled timer.
2038                    wake_fut.await.unwrap().unwrap();
2039                };
2040
2041                // Schedule the same timer as above, but with a shorter deadline.
2042                // The result should be that when the short deadline expires, it's
2043                // sooner than the long deadline.
2044                let short_deadline_fut = async move {
2045                    // Wait until we know that the long deadline timer has been scheduled.
2046                    let _ = sync_recv.next().await;
2047
2048                    let keep_alive2 = zx::Event::create();
2049                    let _ = wake_proxy
2050                        .borrow()
2051                        .set_and_wait(
2052                            zx::BootInstant::from_nanos(SHORT_DEADLINE_NANOS).into(),
2053                            keep_alive2,
2054                            ALARM_ID.into(),
2055                        )
2056                        .await
2057                        .unwrap()
2058                        .unwrap();
2059
2060                    // We get here presumably after the "short" deadline expires, verify that:
2061                    assert_gt!(fasync::BootInstant::now().into_nanos(), SHORT_DEADLINE_NANOS);
2062                    assert_lt!(fasync::BootInstant::now().into_nanos(), LONG_DEADLINE_NANOS);
2063                };
2064                futures::join!(short_deadline_fut, long_deadline_fut);
2065            },
2066        );
2067    }
2068
2069    // Test what happens when we schedule a timer, then change our mind and
2070    // reschedule the same timer, but with a sooner deadline.
2071    #[fuchsia::test]
2072    fn test_reschedule_push_out() {
2073        const LONG_DEADLINE_NANOS: i64 = 200;
2074        const SHORT_DEADLINE_NANOS: i64 = 100;
2075        const ALARM_ID: &str = "Hello";
2076        run_in_fake_time_and_test_context(
2077            zx::MonotonicDuration::from_nanos(LONG_DEADLINE_NANOS + 50),
2078            |wake_proxy, inspector| async move {
2079                let wake_proxy = Rc::new(RefCell::new(wake_proxy));
2080
2081                let keep_alive = zx::Event::create();
2082
2083                let (mut sync_send, mut sync_recv) = mpsc::channel(1);
2084
2085                // Schedule timer with a long timeout first. Let it wait, then
2086                // try to reschedule the same timer
2087                let wake_proxy_clone = wake_proxy.clone();
2088                let short_deadline_fut = async move {
2089                    let wake_fut = wake_proxy_clone.borrow().set_and_wait(
2090                        zx::BootInstant::from_nanos(SHORT_DEADLINE_NANOS).into(),
2091                        keep_alive,
2092                        ALARM_ID.into(),
2093                    );
2094                    // Allow the rest of the test to proceed from here.
2095                    sync_send.send(()).await.unwrap();
2096
2097                    // Yield-wait for the first scheduled timer.
2098                    let result = wake_fut.await.unwrap();
2099                    assert_eq!(
2100                        result,
2101                        Err(fta::WakeError::Dropped),
2102                        "expected wake alarm to be dropped"
2103                    );
2104                    assert_gt!(fasync::BootInstant::now().into_nanos(), SHORT_DEADLINE_NANOS);
2105                };
2106
2107                // Schedule the same timer as above, but with a shorter deadline.
2108                // The result should be that when the short deadline expires, it's
2109                // sooner than the long deadline.
2110                let long_deadline_fut = async move {
2111                    // Wait until we know that the other deadline timer has been scheduled.
2112                    let _ = sync_recv.next().await;
2113
2114                    let keep_alive2 = zx::Event::create();
2115                    let _ = wake_proxy
2116                        .borrow()
2117                        .set_and_wait(
2118                            zx::BootInstant::from_nanos(LONG_DEADLINE_NANOS).into(),
2119                            keep_alive2,
2120                            ALARM_ID.into(),
2121                        )
2122                        .await
2123                        .unwrap()
2124                        .unwrap();
2125
2126                    // Both the short and the long deadline expire.
2127                    assert_gt!(fasync::BootInstant::now().into_nanos(), LONG_DEADLINE_NANOS);
2128                };
2129                futures::join!(long_deadline_fut, short_deadline_fut);
2130
2131                // The values in the inspector tree are fixed because the test
2132                // runs fully deterministically in fake time.
2133                assert_data_tree!(inspector, root: {
2134                    test: {
2135                        hardware: {
2136                            // All alarms fired, so this should be "none".
2137                            current_deadline: "(none)",
2138                            remaining_until_alarm: "(none)",
2139                        },
2140                        now_formatted: "247ns (247)",
2141                        now_ns: 247i64,
2142                        pending_timers: "\n\t",
2143                        pending_timers_count: 0u64,
2144                        requested_deadlines_ns: AnyProperty,
2145                    },
2146                });
2147            },
2148        );
2149    }
2150}