1mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
38use fuchsia_inspect::{IntProperty, NumericProperty, Property};
39use futures::channel::mpsc;
40use futures::sink::SinkExt;
41use futures::{StreamExt, TryStreamExt};
42use log::{debug, error, warn};
43use scopeguard::defer;
44use std::cell::RefCell;
45use std::rc::Rc;
46use std::sync::LazyLock;
47use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
48use zx::AsHandleRef;
49use {
50 fidl_fuchsia_driver_token as fdt, fidl_fuchsia_hardware_hrtimer as ffhh,
51 fidl_fuchsia_time_alarms as fta, fuchsia_async as fasync, fuchsia_inspect as finspect,
52 fuchsia_runtime as fxr, fuchsia_trace as trace,
53};
54
55static DEBUG_STACK_TRACE_TOKEN: std::sync::OnceLock<zx::Event> = std::sync::OnceLock::new();
56static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
57static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
58
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
61
62static MIN_USEFUL_TICKS: u64 = 1;
66
67const MAIN_TIMER_ID: usize = 6;
70
71const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
73
74const TIMEOUT_SECONDS: i64 = 40;
75
76async fn request_stack_trace() {
77 if let Some(ev) = DEBUG_STACK_TRACE_TOKEN.get() {
78 log::warn!("*** DRIVER STACK TRACE REQUESTED: expect a driver stack trace below.");
79 let ev_dup = clone_handle(ev);
80 let debug_proxy = fuchsia_component::client::connect_to_protocol::<fdt::DebugMarker>();
81 match debug_proxy {
82 Ok(proxy) => {
83 if let Err(e) = proxy.log_stack_trace(ev_dup).await {
84 log::warn!("failed to log stack trace: {:?}", e);
85 }
86 }
87 Err(e) => {
88 log::warn!("failed to connect to Debug protocol: {:?}", e);
89 }
90 }
91 } else {
92 log::warn!("DEBUG_STACK_TRACE_TOKEN not initialized, cannot log stack trace");
93 }
94}
95
96macro_rules! log_long_op {
99 ($fut:expr) => {{
100 use futures::FutureExt;
101 let fut = $fut;
102 futures::pin_mut!(fut);
103 let mut logged = false;
104 loop {
105 let timeout = fasync::Timer::new(zx::MonotonicDuration::from_seconds(TIMEOUT_SECONDS));
106 futures::select! {
107 res = fut.as_mut().fuse() => {
108 if logged {
109 log::warn!("unexpected blocking is now resolved: long-running async operation at {}:{}.",
110 file!(), line!());
111 }
112 break res;
113 }
114 _ = timeout.fuse() => {
115 log::warn!("unexpected blocking: long-running async op at {}:{}. Report to `componentId:1408151`",
117 file!(), line!());
118 if !logged {
119 #[cfg(all(target_os = "fuchsia", not(doc)))]
120 ::debug::backtrace_request_all_threads();
121 fasync::Task::local(request_stack_trace()).detach();
122 }
123 logged = true;
124 }
125 }
126 }
127 }};
128}
129
130struct ScopedInc<'a> {
132 property: &'a IntProperty,
133}
134
135impl<'a> ScopedInc<'a> {
136 fn new(property: &'a IntProperty) -> Self {
137 property.add(1);
138 Self { property }
139 }
140}
141
142impl<'a> Drop for ScopedInc<'a> {
143 fn drop(&mut self) {
144 self.property.add(-1);
145 }
146}
147
148fn is_deadline_changed(
151 before: Option<fasync::BootInstant>,
152 after: Option<fasync::BootInstant>,
153) -> bool {
154 match (before, after) {
155 (None, None) => false,
156 (None, Some(_)) | (Some(_), None) => true,
157 (Some(before), Some(after)) => before != after,
158 }
159}
160
161#[derive(Debug, Clone)]
163pub(crate) enum TimerOpsError {
164 Driver(ffhh::DriverError),
166 Fidl(fidl::Error),
168}
169
170impl Into<fta::WakeAlarmsError> for TimerOpsError {
171 fn into(self) -> fta::WakeAlarmsError {
174 match self {
175 TimerOpsError::Fidl(fidl::Error::ClientChannelClosed { .. }) => {
176 fta::WakeAlarmsError::DriverConnection
177 }
178 TimerOpsError::Driver(ffhh::DriverError::InternalError) => fta::WakeAlarmsError::Driver,
179 _ => fta::WakeAlarmsError::Internal,
180 }
181 }
182}
183
184impl TimerOpsError {
185 fn is_canceled(&self) -> bool {
186 match self {
187 TimerOpsError::Driver(ffhh::DriverError::Canceled) => true,
188 _ => false,
189 }
190 }
191}
192
193trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
194 }
196
197#[async_trait(?Send)]
199pub(crate) trait TimerOps {
200 async fn stop(&self, id: u64);
202
203 async fn get_timer_properties(&self) -> TimerConfig;
206
207 fn start_and_wait(
212 &self,
213 id: u64,
214 resolution: &ffhh::Resolution,
215 ticks: u64,
216 setup_event: zx::Event,
217 ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
218}
219
220struct HardwareTimerOps {
222 proxy: ffhh::DeviceProxy,
223}
224
225impl HardwareTimerOps {
226 fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
227 Box::new(Self { proxy })
228 }
229}
230
231#[async_trait(?Send)]
232impl TimerOps for HardwareTimerOps {
233 async fn stop(&self, id: u64) {
234 let _ = self
235 .proxy
236 .stop(id)
237 .await
238 .map(|result| {
239 let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
240 })
241 .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
242 }
243
244 async fn get_timer_properties(&self) -> TimerConfig {
245 match log_long_op!(self.proxy.get_properties()) {
246 Ok(p) => {
247 if let Some(token) = p.driver_node_token {
248 let _ = DEBUG_STACK_TRACE_TOKEN.set(token);
249 }
250 let timers_properties = &p.timers_properties.expect("timers_properties must exist");
251 debug!("get_timer_properties: got: {:?}", timers_properties);
252
253 let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
255 MAIN_TIMER_ID
258 } else if timers_properties.len() > 0 {
259 0
263 } else {
264 return TimerConfig::new_empty();
266 };
267 let main_timer_properties = &timers_properties[timer_index];
268 debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
269 let max_ticks: u64 = std::cmp::min(
271 main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
272 *MAX_USEFUL_TICKS,
273 );
274 let resolutions = &main_timer_properties
275 .supported_resolutions
276 .as_ref()
277 .expect("supported_resolutions is populated")
278 .iter()
279 .last() .map(|r| match *r {
281 ffhh::Resolution::Duration(d) => d,
282 _ => {
283 error!(
284 "get_timer_properties: Unknown resolution type, returning millisecond."
285 );
286 MSEC_IN_NANOS
287 }
288 })
289 .map(|d| zx::BootDuration::from_nanos(d))
290 .into_iter() .collect::<Vec<_>>();
292 let timer_id = main_timer_properties.id.expect("timer ID is always present");
293 TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
294 }
295 Err(e) => {
296 error!("could not get timer properties: {:?}", e);
297 TimerConfig::new_empty()
298 }
299 }
300 }
301
302 fn start_and_wait(
303 &self,
304 id: u64,
305 resolution: &ffhh::Resolution,
306 ticks: u64,
307 setup_event: zx::Event,
308 ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
309 let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
310 Box::pin(HwResponseFut { pinner: Box::pin(inner) })
311 }
312}
313
314struct HwResponseFut {
317 pinner: std::pin::Pin<
318 Box<
319 fidl::client::QueryResponseFut<
320 ffhh::DeviceStartAndWaitResult,
321 fidl::encoding::DefaultFuchsiaResourceDialect,
322 >,
323 >,
324 >,
325}
326
327use std::task::Poll;
328impl SawResponseFut for HwResponseFut {}
329impl std::future::Future for HwResponseFut {
330 type Output = Result<zx::EventPair, TimerOpsError>;
331 fn poll(
332 mut self: std::pin::Pin<&mut Self>,
333 cx: &mut std::task::Context<'_>,
334 ) -> std::task::Poll<Self::Output> {
335 let inner_poll = self.pinner.as_mut().poll(cx);
336 match inner_poll {
337 Poll::Ready(result) => Poll::Ready(match result {
338 Ok(Ok(keep_alive)) => Ok(keep_alive),
339 Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
340 Err(e) => Err(TimerOpsError::Fidl(e)),
341 }),
342 Poll::Pending => Poll::Pending,
343 }
344 }
345}
346
347async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
349 trace::duration!("alarms", "hrtimer:stop", "id" => timer_config.id);
350 debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
351 hrtimer.stop(timer_config.id).await;
352 debug!("stop_hrtimer: stopped hardware timer: {}", timer_config.id);
353}
354
355const CHANNEL_SIZE: usize = 1000;
358
359#[derive(Debug)]
361enum Cmd {
362 Start {
364 conn_id: zx::Koid,
366 deadline: timers::Deadline,
368 mode: Option<fta::SetMode>,
377 alarm_id: String,
379 responder: Rc<dyn timers::Responder>,
387 },
388 StopById {
389 done: zx::Event,
390 timer_id: timers::Id,
391 },
392 Alarm {
393 expired_deadline: fasync::BootInstant,
394 keep_alive: fidl::EventPair,
395 },
396 AlarmFidlError {
397 expired_deadline: fasync::BootInstant,
398 error: fidl::Error,
399 },
400 AlarmDriverError {
401 expired_deadline: fasync::BootInstant,
402 error: ffhh::DriverError,
403
404 timer_config_id: u64,
406 resolution_nanos: i64,
407 ticks: u64,
408 },
409 UtcUpdated {
411 transform: fxr::UtcClockTransform,
413 },
414}
415
416impl std::fmt::Display for Cmd {
417 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
418 match self {
419 Cmd::Start { conn_id, deadline, alarm_id, .. } => {
420 write!(
421 f,
422 "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
423 alarm_id, conn_id, deadline,
424 )
425 }
426 Cmd::Alarm { expired_deadline, .. } => {
427 write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
428 }
429 Cmd::AlarmFidlError { expired_deadline, error } => {
430 write!(
431 f,
432 "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
433 format_timer((*expired_deadline).into()),
434 error
435 )
436 }
437 Cmd::AlarmDriverError { expired_deadline, error, .. } => {
438 write!(
439 f,
440 "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
441 format_timer((*expired_deadline).into()),
442 error
443 )
444 }
445 Cmd::StopById { timer_id, done: _ } => {
446 write!(f, "StopById[timerId={}]", timer_id,)
447 }
448 Cmd::UtcUpdated { transform } => {
449 write!(f, "UtcUpdated[timerId={transform:?}]")
450 }
451 }
452 }
453}
454
455pub fn get_stream_koid(
468 stream: fta::WakeAlarmsRequestStream,
469) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
470 let (inner, is_terminated) = stream.into_inner();
471 let koid = inner.channel().as_channel().as_handle_ref().koid().expect("infallible");
472 let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
473 (koid, stream)
474}
475
476pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
486 let timer_loop = timer_loop.clone();
487 let timer_loop_send = || timer_loop.get_sender();
488 let (conn_id, mut requests) = get_stream_koid(requests);
489 let mut request_count = 0;
490 debug!("alarms::serve: opened connection: {:?}", conn_id);
491 while let Some(maybe_request) = requests.next().await {
492 request_count += 1;
493 debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
494 match maybe_request {
495 Ok(request) => {
496 handle_request(conn_id, timer_loop_send(), request).await;
498 }
499 Err(e) => {
500 warn!("alarms::serve: error in request: {:?}", e);
501 }
502 }
503 debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
504 }
505 warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
508}
509
510async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
511 let done = zx::Event::create();
512 let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
513 if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
514 warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
515 }
516 wait_signaled(&done).await;
517}
518
519async fn handle_request(
527 conn_id: zx::Koid,
528 mut cmd: mpsc::Sender<Cmd>,
529 request: fta::WakeAlarmsRequest,
530) {
531 match request {
532 fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
533 let responder = Rc::new(RefCell::new(Some(responder)));
543
544 debug!(
546 "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
547 alarm_id,
548 conn_id,
549 format_timer(deadline.into())
550 );
551 let deadline = timers::Deadline::Boot(deadline.into());
553 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
554 conn_id,
555 deadline,
556 mode: Some(mode),
557 alarm_id: alarm_id.clone(),
558 responder: responder.clone(),
559 })) {
560 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
561 responder
562 .borrow_mut()
563 .take()
564 .expect("always present if call fails")
565 .send(Err(fta::WakeAlarmsError::Internal))
566 .unwrap();
567 }
568 }
569 fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
570 let deadline =
572 timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
573
574 let responder = Rc::new(RefCell::new(Some(responder)));
577 debug!(
578 "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
579 );
580
581 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
582 conn_id,
583 deadline,
584 mode: Some(mode),
585 alarm_id: alarm_id.clone(),
586 responder: responder.clone(),
587 })) {
588 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
589 responder
590 .borrow_mut()
591 .take()
592 .expect("always present if call fails")
593 .send(Err(fta::WakeAlarmsError::Internal))
594 .unwrap();
595 }
596 }
597 fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
598 log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
601 }
602 fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
603 debug!(
605 "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
606 format_timer(deadline.into())
607 );
608 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
610 conn_id,
611 deadline: timers::Deadline::Boot(deadline.into()),
612 mode: Some(mode),
613 alarm_id: alarm_id.clone(),
614 responder: Rc::new(RefCell::new(Some(notifier))),
615 })) {
616 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
617 responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
618 } else {
619 responder.send(Ok(())).unwrap();
621 }
622 }
623 fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
624 };
625}
626
627pub struct Loop {
634 snd: mpsc::Sender<Cmd>,
637}
638
639impl Loop {
640 pub fn new(
655 scope: fasync::ScopeHandle,
656 device_proxy: ffhh::DeviceProxy,
657 inspect: finspect::Node,
658 utc_clock: fxr::UtcClock,
659 ) -> Self {
660 let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
661 Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
662 }
663
664 pub fn new_emulated(
678 scope: fasync::ScopeHandle,
679 inspect: finspect::Node,
680 utc_clock: fxr::UtcClock,
681 ) -> Self {
682 let timer_ops = Box::new(EmulationTimerOps::new());
683 Loop::new_internal(scope, timer_ops, inspect, utc_clock)
684 }
685
686 fn new_internal(
687 scope: fasync::ScopeHandle,
688 timer_ops: Box<dyn TimerOps>,
689 inspect: finspect::Node,
690 utc_clock: fxr::UtcClock,
691 ) -> Self {
692 let utc_transform = Rc::new(RefCell::new(
693 utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
694 ));
695
696 let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
697 let loop_scope = scope.clone();
698
699 scope.spawn_local(wake_timer_loop(
700 loop_scope,
701 snd.clone(),
702 rcv,
703 timer_ops,
704 inspect,
705 utc_transform,
706 ));
707 scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
708 Self { snd }
709 }
710
711 fn get_sender(&self) -> mpsc::Sender<Cmd> {
714 self.snd.clone()
715 }
716}
717
718async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
721 let koid = utc_clock.as_handle_ref().koid();
722 log::info!("monitor_utc_clock_changes: entry");
723 loop {
724 fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
726 .await
727 .expect("UTC clock is readable");
728
729 let transform =
730 utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
731 log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
732 if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
733 log::warn!("monitor_utc_clock_changes: exit: {err:?}");
735 break;
736 }
737 }
738}
739
740pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
752 handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
753}
754
755async fn wait_signaled<H: HandleBased>(handle: &H) {
756 fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
757}
758
759pub(crate) fn signal(event: &zx::Event) {
760 event.signal(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
761}
762
763#[derive(Debug, Clone, Copy)]
771struct TimerDuration {
772 resolution: zx::BootDuration,
774 ticks: u64,
777}
778
779impl Eq for TimerDuration {}
782
783impl std::cmp::PartialOrd for TimerDuration {
784 fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
785 Some(self.cmp(other))
786 }
787}
788
789impl std::cmp::PartialEq for TimerDuration {
790 fn eq(&self, other: &Self) -> bool {
791 self.cmp(other) == std::cmp::Ordering::Equal
792 }
793}
794
795impl std::cmp::Ord for TimerDuration {
796 fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
799 let self_ticks_128: i128 = self.ticks as i128;
800 let self_resolution: i128 = self.resolution_as_nanos() as i128;
801 let self_nanos = self_resolution * self_ticks_128;
802
803 let other_ticks_128: i128 = other.ticks as i128;
804 let other_resolution: i128 = other.resolution_as_nanos() as i128;
805 let other_nanos = other_resolution * other_ticks_128;
806
807 self_nanos.cmp(&other_nanos)
808 }
809}
810
811impl std::fmt::Display for TimerDuration {
812 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
816 let ticks = self.ticks;
817 let resolution = self.resolution();
818 write!(f, "{}x{}", ticks, format_duration(resolution),)
820 }
821}
822
823impl TimerDuration {
824 fn max() -> Self {
826 TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
827 }
828
829 fn zero() -> Self {
831 TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
832 }
833
834 fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
836 Self { resolution, ticks }
837 }
838
839 fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
842 Self::new(res_source.resolution, ticks)
843 }
844
845 fn duration(&self) -> zx::BootDuration {
850 let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
851 let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
852 zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
853 }
854
855 fn resolution(&self) -> zx::BootDuration {
857 self.resolution
858 }
859
860 fn resolution_as_nanos(&self) -> u64 {
861 self.resolution().into_nanos().try_into().expect("resolution is never negative")
862 }
863
864 fn ticks(&self) -> u64 {
866 self.ticks
867 }
868}
869
870impl From<zx::BootDuration> for TimerDuration {
871 fn from(d: zx::BootDuration) -> TimerDuration {
872 let nanos = d.into_nanos();
873 assert!(nanos >= 0);
874 let nanos_u64 = nanos.try_into().expect("guarded by assert");
875 TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
876 }
877}
878
879impl std::ops::Div for TimerDuration {
880 type Output = u64;
881 fn div(self, rhs: Self) -> Self::Output {
882 let self_nanos = self.resolution_as_nanos() * self.ticks;
883 let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
884 self_nanos / rhs_nanos
885 }
886}
887
888impl std::ops::Mul<u64> for TimerDuration {
889 type Output = Self;
890 fn mul(self, rhs: u64) -> Self::Output {
891 Self::new(self.resolution, self.ticks * rhs)
892 }
893}
894
895#[derive(Debug)]
897pub(crate) struct TimerConfig {
898 resolutions: Vec<zx::BootDuration>,
906 max_ticks: u64,
911 id: u64,
913}
914
915impl TimerConfig {
916 fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
919 debug!(
920 "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
921 resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
922 max_ticks,
923 timer_id
924 );
925 let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
926 TimerConfig { resolutions, max_ticks, id: timer_id }
927 }
928
929 fn new_empty() -> Self {
930 error!("TimerConfig::new_empty() called, this is not OK.");
931 TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
932 }
933
934 fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
944 assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
945
946 if duration <= zx::BootDuration::ZERO {
949 return TimerDuration::new(self.resolutions[0], 1);
950 }
951
952 let mut best_positive_slack = TimerDuration::zero();
959 let mut best_negative_slack = TimerDuration::max();
960
961 if self.max_ticks == 0 {
962 return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
963 }
964 let duration_slack: TimerDuration = duration.into();
965
966 for res1 in self.resolutions.iter() {
967 let smallest_unit = TimerDuration::new(*res1, 1);
968 let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
969
970 let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
971 let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
972
973 if smallest_slack_larger_than_duration {
974 if duration_slack == TimerDuration::zero() {
975 best_negative_slack = TimerDuration::zero();
976 } else if smallest_unit < best_negative_slack {
977 best_negative_slack = smallest_unit;
978 }
979 }
980 if largest_slack_smaller_than_duration {
981 if max_tick_at_res > best_positive_slack
982 || best_positive_slack == TimerDuration::zero()
983 {
984 best_positive_slack = max_tick_at_res;
985 }
986 }
987
988 if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
990 let q = duration_slack / smallest_unit;
993 let d = smallest_unit * q;
994 if d == duration_slack {
995 return d;
997 } else {
998 if d > best_positive_slack {
1000 best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
1001 }
1002 let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
1003 if d_plus < best_negative_slack {
1004 best_negative_slack = d_plus;
1005 }
1006 }
1007 }
1008 }
1009
1010 let p_slack = duration - best_positive_slack.duration();
1011 let n_slack = best_negative_slack.duration() - duration;
1012
1013 let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
1018 best_positive_slack
1019 } else {
1020 best_negative_slack
1021 };
1022 debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
1023 assert!(
1024 ret.duration().into_nanos() >= 0,
1025 "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
1026 ret,
1027 format_duration(p_slack),
1028 format_duration(n_slack),
1029 format_duration(duration),
1030 best_positive_slack,
1031 best_negative_slack,
1032 duration_slack,
1033 p_slack != zx::BootDuration::ZERO,
1034 p_slack,
1035 zx::BootDuration::ZERO,
1036 );
1037 ret
1038 }
1039}
1040
1041async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1042 debug!("get_timer_properties: requesting timer properties.");
1043 hrtimer.get_timer_properties().await
1044}
1045
1046struct TimerState {
1048 task: fasync::Task<()>,
1050 deadline: fasync::BootInstant,
1052}
1053
1054async fn wake_timer_loop(
1063 scope: fasync::ScopeHandle,
1064 snd: mpsc::Sender<Cmd>,
1065 mut cmds: mpsc::Receiver<Cmd>,
1066 timer_proxy: Box<dyn TimerOps>,
1067 inspect: finspect::Node,
1068 utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1069) {
1070 debug!("wake_timer_loop: started");
1071
1072 let mut timers = timers::Heap::new(utc_transform.clone());
1073 let timer_config = get_timer_properties(&timer_proxy).await;
1074
1075 #[allow(clippy::collection_is_never_read)]
1078 let mut hrtimer_status: Option<TimerState> = None;
1079
1080 let now_prop = inspect.create_int("now_ns", 0);
1087 let now_formatted_prop = inspect.create_string("now_formatted", "");
1088 let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1089 let pending_timers_prop = inspect.create_string("pending_timers", "");
1090 let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1091 "requested_deadlines_ns",
1092 finspect::ExponentialHistogramParams {
1093 floor: 0,
1094 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1095 step_multiplier: 10,
1097 buckets: 16,
1098 },
1099 );
1100 let slack_histogram_prop = inspect.create_int_exponential_histogram(
1101 "slack_ns",
1102 finspect::ExponentialHistogramParams {
1103 floor: 0,
1104 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1105 step_multiplier: 10,
1106 buckets: 16,
1107 },
1108 );
1109 let schedule_delay_prop = inspect.create_int_exponential_histogram(
1110 "schedule_delay_ns",
1111 finspect::ExponentialHistogramParams {
1112 floor: 0,
1113 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1114 step_multiplier: 10,
1115 buckets: 16,
1116 },
1117 );
1118 let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1119 let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1120 let hw_node = inspect.create_child("hardware");
1122 let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1123 let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1124
1125 let debug_node = inspect.create_child("debug_node");
1127 let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1128 let start_count = debug_node.create_int("start_count", 0);
1129 let responder_count = debug_node.create_int("responder_count", 0);
1130 let stop_count = debug_node.create_int("stop", 0);
1131 let stop_responder_count = debug_node.create_int("stop_responder", 0);
1132 let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1133 let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1134 let alarm_count = debug_node.create_int("alarm", 0);
1135 let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1136 let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1137 let utc_update_count = debug_node.create_int("utc_update", 0);
1138 let status_count = debug_node.create_int("status", 0);
1139 let loop_count = debug_node.create_int("loop_count", 0);
1140
1141 let hrtimer_node = debug_node.create_child("hrtimer");
1142
1143 const LRU_CACHE_CAPACITY: usize = 100;
1144 let mut error_cache = lru_cache::LruCache::new(LRU_CACHE_CAPACITY);
1145
1146 while let Some(cmd) = cmds.next().await {
1147 let _i = ScopedInc::new(&loop_count);
1148 trace::duration!("alarms", "Cmd");
1149 let now = fasync::BootInstant::now();
1151 now_prop.set(now.into_nanos());
1152 trace::instant!("alarms", "wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1153 match cmd {
1154 Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1155 let _i = ScopedInc::new(&start_count);
1156 trace::duration!("alarms", "Cmd::Start");
1157 fuchsia_trace::flow_step!(
1158 "alarms",
1159 "hrtimer_lifecycle",
1160 timers::get_trace_id(&alarm_id)
1161 );
1162 debug!(
1164 "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow: {}",
1165 alarm_id,
1166 conn_id,
1167 deadline,
1168 format_timer(now.into()),
1169 );
1170
1171 defer! {
1172 let _i = ScopedInc::new(&start_notify_setup_count);
1173 if let Some(mode) = mode {
1175 if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1176 signal(&setup_done);
1178 debug!("wake_timer_loop: START: setup_done signaled");
1179 };
1180 }
1181 }
1182 let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1183
1184 match deadline {
1188 timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1189 timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1190 };
1191
1192 if timers::Heap::expired(now, deadline_boot) {
1193 trace::duration!("alarms", "Cmd::Start:immediate");
1194 fuchsia_trace::flow_step!(
1195 "alarms",
1196 "hrtimer_lifecycle",
1197 timers::get_trace_id(&alarm_id)
1198 );
1199 let (_lease, keep_alive) = zx::EventPair::create();
1201 debug!(
1202 "[{}] wake_timer_loop: bogus lease {:?}",
1203 line!(),
1204 &keep_alive.koid().unwrap()
1205 );
1206
1207 {
1208 let _i1 = ScopedInc::new(&responder_count);
1209 if let Err(e) = responder
1210 .send(&alarm_id, Ok(keep_alive))
1211 .expect("responder is always present")
1212 {
1213 error!(
1214 "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1215 );
1216 } else {
1217 debug!(
1218 "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1219 format_timer(deadline_boot.into()),
1220 format_timer(now.into()),
1221 deadline,
1222 )
1223 }
1224 }
1225 } else {
1226 trace::duration!("alarms", "Cmd::Start:regular");
1227 fuchsia_trace::flow_step!(
1228 "alarms",
1229 "hrtimer_lifecycle",
1230 timers::get_trace_id(&alarm_id)
1231 );
1232 let was_empty = timers.is_empty();
1234
1235 let deadline_before = timers.peek_deadline_as_boot();
1236 let node = match deadline {
1237 timers::Deadline::Boot(_) => {
1238 timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1239 }
1240 timers::Deadline::Utc(d) => {
1241 timers.new_node_utc(d, alarm_id, conn_id, responder)
1242 }
1243 };
1244 timers.push(node);
1245 let deadline_after = timers.peek_deadline_as_boot();
1246
1247 let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1248 let needs_cancel = !was_empty && deadline_changed;
1249 let needs_reschedule = was_empty || deadline_changed;
1250
1251 if needs_reschedule {
1252 let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1254 if needs_cancel {
1255 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1256 }
1257 hrtimer_status = Some(
1258 schedule_hrtimer(
1259 scope.clone(),
1260 now,
1261 &timer_proxy,
1262 schedulable_deadline,
1263 snd.clone(),
1264 &timer_config,
1265 &schedule_delay_prop,
1266 &hrtimer_node,
1267 )
1268 .await,
1269 );
1270 }
1271 }
1272 }
1273 Cmd::StopById { timer_id, done } => {
1274 let _i = ScopedInc::new(&stop_count);
1275 defer! {
1276 signal(&done);
1277 }
1278 trace::duration!("alarms", "Cmd::StopById", "alarm_id" => timer_id.alarm());
1279 fuchsia_trace::flow_step!(
1280 "alarms",
1281 "hrtimer_lifecycle",
1282 timers::get_trace_id(&timer_id.alarm())
1283 );
1284 debug!("wake_timer_loop: STOP timer: {}", timer_id);
1285 let deadline_before = timers.peek_deadline_as_boot();
1286
1287 if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1288 let deadline_after = timers.peek_deadline_as_boot();
1289
1290 {
1291 let _i = ScopedInc::new(&stop_responder_count);
1292 if let Some(res) = timer_node
1293 .get_responder()
1294 .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1295 {
1296 res.expect("infallible");
1298 }
1299 }
1300 if is_deadline_changed(deadline_before, deadline_after) {
1301 let _i = ScopedInc::new(&stop_hrtimer_count);
1302 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1303 }
1304 if let Some(deadline) = deadline_after {
1305 let _i = ScopedInc::new(&schedule_hrtimer_count);
1306 let new_timer_state = schedule_hrtimer(
1309 scope.clone(),
1310 now,
1311 &timer_proxy,
1312 deadline,
1313 snd.clone(),
1314 &timer_config,
1315 &schedule_delay_prop,
1316 &hrtimer_node,
1317 )
1318 .await;
1319 let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1320 if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1321 log_long_op!(task);
1325 }
1326 } else {
1327 hrtimer_status = None;
1329 }
1330 } else {
1331 debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1332 }
1333 }
1334 Cmd::Alarm { expired_deadline, keep_alive } => {
1335 let _i = ScopedInc::new(&alarm_count);
1336
1337 trace::duration!("alarms", "Cmd::Alarm");
1338 debug!(
1343 "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1344 format_timer(expired_deadline.into()),
1345 keep_alive.koid().unwrap(),
1346 );
1347 let expired_count =
1348 notify_all(&mut timers, &keep_alive, now, None, &slack_histogram_prop)
1349 .expect("notification succeeds");
1350 if expired_count == 0 {
1351 debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1354 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1355 }
1356 hrtimer_status = match timers.peek_deadline_as_boot() {
1358 None => None,
1359 Some(deadline) => Some(
1360 schedule_hrtimer(
1361 scope.clone(),
1362 now,
1363 &timer_proxy,
1364 deadline,
1365 snd.clone(),
1366 &timer_config,
1367 &schedule_delay_prop,
1368 &hrtimer_node,
1369 )
1370 .await,
1371 ),
1372 }
1373 }
1374 Cmd::AlarmFidlError { expired_deadline, error } => {
1375 let _i = ScopedInc::new(&alarm_fidl_count);
1376
1377 trace::duration!("alarms", "Cmd::AlarmFidlError");
1378 let error_string = format!("{}", error);
1382 if !error_cache.contains_key(&error_string) {
1383 warn!(
1384 "wake_timer_loop: FIDL error: {}, deadline: {}, now: {}",
1385 error,
1386 format_timer(expired_deadline.into()),
1387 format_timer(now.into()),
1388 );
1389 error_cache.insert(error_string, ());
1390 }
1391 let (_dummy_lease, peer) = zx::EventPair::create();
1394 debug!(
1395 "bogus lease: {:?} fidl error [{}:{}]",
1396 &peer.koid().unwrap(),
1397 file!(),
1398 line!()
1399 );
1400 notify_all(
1401 &mut timers,
1402 &peer,
1403 now,
1404 Some(TimerOpsError::Fidl(error)),
1405 &slack_histogram_prop,
1406 )
1407 .expect("notification succeeds");
1408 hrtimer_status = match timers.peek_deadline_as_boot() {
1409 None => None, Some(deadline) => Some(
1411 schedule_hrtimer(
1412 scope.clone(),
1413 now,
1414 &timer_proxy,
1415 deadline,
1416 snd.clone(),
1417 &timer_config,
1418 &schedule_delay_prop,
1419 &hrtimer_node,
1420 )
1421 .await,
1422 ),
1423 }
1424 }
1425 Cmd::AlarmDriverError {
1426 expired_deadline,
1427 error,
1428 timer_config_id,
1429 resolution_nanos,
1430 ticks,
1431 } => {
1432 let _i = ScopedInc::new(&alarm_driver_count);
1433
1434 trace::duration!("alarms", "Cmd::AlarmDriverError");
1435 let (_dummy_lease, peer) = zx::EventPair::create();
1436 debug!(
1437 "bogus lease: {:?} driver error. [{}:{}]",
1438 &peer.koid().unwrap(),
1439 file!(),
1440 line!()
1441 );
1442 notify_all(
1443 &mut timers,
1444 &peer,
1445 now,
1446 Some(TimerOpsError::Driver(error)),
1447 &slack_histogram_prop,
1448 )
1449 .expect("notification succeeds");
1450 match error {
1451 fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1452 debug!(
1454 "wake_timer_loop: CANCELED timer at deadline: {}",
1455 format_timer(expired_deadline.into())
1456 );
1457 }
1458 _ => {
1459 error!(
1460 "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1461 error,
1462 format_timer(expired_deadline.into()),
1463 format_timer(now.into()),
1464 timer_config_id,
1465 resolution_nanos,
1466 ticks,
1467 );
1468 hrtimer_status = match timers.peek_deadline_as_boot() {
1472 None => None,
1473 Some(deadline) => Some(
1474 schedule_hrtimer(
1475 scope.clone(),
1476 now,
1477 &timer_proxy,
1478 deadline,
1479 snd.clone(),
1480 &timer_config,
1481 &schedule_delay_prop,
1482 &hrtimer_node,
1483 )
1484 .await,
1485 ),
1486 }
1487 }
1488 }
1489 }
1490 Cmd::UtcUpdated { transform } => {
1491 let _i = ScopedInc::new(&utc_update_count);
1492
1493 trace::duration!("alarms", "Cmd::UtcUpdated");
1494 debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1495
1496 *utc_transform.borrow_mut() = transform;
1499
1500 if hrtimer_status.is_some() {
1503 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1504 hrtimer_status = match timers.peek_deadline_as_boot() {
1506 None => None,
1507 Some(deadline) => Some(
1508 schedule_hrtimer(
1509 scope.clone(),
1510 now,
1511 &timer_proxy,
1512 deadline,
1513 snd.clone(),
1514 &timer_config,
1515 &schedule_delay_prop,
1516 &hrtimer_node,
1517 )
1518 .await,
1519 ),
1520 }
1521 }
1522 }
1523 }
1524
1525 {
1526 let _i = ScopedInc::new(&status_count);
1527
1528 trace::duration!("timekeeper", "inspect");
1533 let now_formatted = format_timer(now.into());
1534 debug!("wake_timer_loop: now: {}", &now_formatted);
1535 now_formatted_prop.set(&now_formatted);
1536
1537 let pending_timers_count: u64 =
1538 timers.timer_count().try_into().expect("always convertible");
1539 debug!("wake_timer_loop: currently pending timer count: {}", pending_timers_count);
1540 pending_timers_count_prop.set(pending_timers_count);
1541
1542 let pending_timers = format!("{}", timers);
1543 debug!("wake_timer_loop: currently pending timers: \n\t{}", &timers);
1544 pending_timers_prop.set(&pending_timers);
1545
1546 let current_deadline: String = hrtimer_status
1547 .as_ref()
1548 .map(|s| format!("{}", format_timer(s.deadline.into())))
1549 .unwrap_or_else(|| "(none)".into());
1550 debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1551 current_hw_deadline_prop.set(¤t_deadline);
1552
1553 let remaining_duration_until_alarm = hrtimer_status
1554 .as_ref()
1555 .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1556 .unwrap_or_else(|| "(none)".into());
1557 debug!(
1558 "wake_timer_loop: remaining duration until alarm: {}",
1559 &remaining_duration_until_alarm
1560 );
1561 remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1562 debug!("---");
1563 }
1564 }
1565
1566 log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1569}
1570
1571async fn schedule_hrtimer(
1585 scope: fasync::ScopeHandle,
1586 now: fasync::BootInstant,
1587 hrtimer: &Box<dyn TimerOps>,
1588 deadline: fasync::BootInstant,
1589 mut command_send: mpsc::Sender<Cmd>,
1590 timer_config: &TimerConfig,
1591 _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1592 debug_node: &finspect::Node,
1593) -> TimerState {
1594 let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1595 trace::duration!("alarms", "schedule_hrtimer", "timeout" => timeout.into_nanos());
1596 let hrtimer_scheduled = zx::Event::create();
1598
1599 let schedule_count = debug_node.create_int("schedule", 0);
1600 let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1601 let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1602
1603 let _sc = ScopedInc::new(&schedule_count);
1604
1605 debug!(
1606 "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1607 format_timer(now.into()),
1608 format_timer(deadline.into()),
1609 format_duration(timeout),
1610 );
1611
1612 let slack = timer_config.pick_setting(timeout);
1613 let resolution_nanos = slack.resolution.into_nanos();
1614 let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1617
1618 trace::instant!("alarms", "hrtimer:programmed",
1619 trace::Scope::Process,
1620 "resolution_ns" => resolution_nanos,
1621 "ticks" => useful_ticks
1622 );
1623 let timer_config_id = timer_config.id;
1624 let start_and_wait_fut = {
1625 let _sc = ScopedInc::new(&hrtimer_wait_count);
1626 hrtimer.start_and_wait(
1627 timer_config.id,
1628 &ffhh::Resolution::Duration(resolution_nanos),
1629 useful_ticks,
1630 clone_handle(&hrtimer_scheduled),
1631 )
1632 };
1633
1634 let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1635 let hrtimer_task = scope.spawn_local(async move {
1636 debug!("hrtimer_task: waiting for hrtimer driver response");
1637 trace::instant!("alarms", "hrtimer:started", trace::Scope::Process);
1638 let response = start_and_wait_fut.await;
1639 trace::instant!("alarms", "hrtimer:response", trace::Scope::Process);
1640 match response {
1641 Err(TimerOpsError::Fidl(e)) => {
1642 defer! {
1643 signal(&hrtimer_scheduled_if_error);
1645 }
1646 trace::instant!("alarms", "hrtimer:response:fidl_error", trace::Scope::Process);
1647 command_send
1648 .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1649 .unwrap();
1650 }
1652 Err(TimerOpsError::Driver(e)) => {
1653 defer! {
1654 signal(&hrtimer_scheduled_if_error);
1657 }
1658 let driver_error_str = format!("{:?}", e);
1659 trace::instant!("alarms", "hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1660 debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1663 command_send
1664 .start_send(Cmd::AlarmDriverError {
1665 expired_deadline: now,
1666 error: e,
1667 timer_config_id,
1668 resolution_nanos,
1669 ticks: useful_ticks,
1670 })
1671 .unwrap();
1672 }
1674 Ok(keep_alive) => {
1675 trace::instant!("alarms", "hrtimer:response:alarm", trace::Scope::Process);
1676 debug!("hrtimer: got alarm response: {:?}", keep_alive);
1677 command_send
1679 .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1680 .unwrap();
1681 }
1682 }
1683 debug!("hrtimer_task: exiting task.");
1684 trace::instant!("alarms", "hrtimer:task_exit", trace::Scope::Process);
1685 }).into();
1686 debug!("schedule_hrtimer: waiting for event to be signaled");
1687
1688 {
1689 let _i = ScopedInc::new(&wait_signaled_count);
1690 log_long_op!(wait_signaled(&hrtimer_scheduled));
1692 }
1693
1694 let now_after_signaled = fasync::BootInstant::now();
1695 let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1696 if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1697 trace::duration!("alarms", "schedule_hrtimer:unusual_duration",
1698 "duration" => duration_until_scheduled.into_nanos());
1699 warn!(
1700 "unusual duration until hrtimer scheduled: {}",
1701 format_duration(duration_until_scheduled)
1702 );
1703 }
1704 debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1707 TimerState { task: hrtimer_task, deadline }
1708}
1709
1710fn notify_all(
1721 timers: &mut timers::Heap,
1722 lease_prototype: &zx::EventPair,
1723 reference_instant: fasync::BootInstant,
1724 timer_ops_error: Option<TimerOpsError>,
1725 _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1726) -> Result<usize> {
1727 trace::duration!("alarms", "notify_all");
1728 let now = fasync::BootInstant::now();
1729 let mut expired = 0;
1730 while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1731 expired += 1;
1732 let deadline = timer_node.get_boot_deadline();
1734 let alarm = timer_node.id().alarm();
1735 let alarm_id = alarm.to_string();
1736 trace::duration!("alarms", "notify_all:notified", "alarm_id" => &*alarm_id);
1737 fuchsia_trace::flow_step!("alarms", "hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1738 let conn_id = timer_node.id().conn.clone();
1739 let slack: zx::BootDuration = deadline - now;
1740 if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1741 trace::duration!("alarms", "schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1742 warn!(
1744 "alarm id: {} had an unusually large slack: {}",
1745 alarm_id,
1746 format_duration(slack)
1747 );
1748 }
1749 if slack < zx::BootDuration::ZERO {
1750 }
1753 if let Some(ref err) = timer_ops_error {
1754 if !err.is_canceled() {
1757 timer_node.get_responder().send(alarm, Err(err.clone().into()));
1758 continue;
1759 }
1760 }
1761 debug!(
1762 concat!(
1763 "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1764 "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1765 ),
1766 alarm_id,
1767 format_timer(deadline.into()),
1768 conn_id,
1769 format_timer(reference_instant.into()),
1770 format_timer(now.into()),
1771 format_duration(slack),
1772 );
1773 let lease = clone_handle(lease_prototype);
1774 trace::instant!("alarms", "notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1775 if let Some(Err(e)) = timer_node.get_responder().send(alarm, Ok(lease)) {
1776 error!("could not signal responder: {:?}", e);
1777 }
1778 trace::instant!("alarms", "notified", trace::Scope::Process);
1779 }
1780 trace::instant!("alarms", "notify", trace::Scope::Process, "expired_count" => expired);
1781 debug!("notify_all: expired count: {}", expired);
1782 Ok(expired)
1783 }
1785
1786const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1789
1790pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1799 debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1800 let dir =
1801 fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1802 .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1803 let path = device_watcher::watch_for_files(&dir)
1804 .await
1805 .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1806 .try_next()
1807 .await
1808 .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1809 let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1810 let path = path
1811 .to_str()
1812 .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1813 connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1814 .context("Failed to connect built-in service")
1815}
1816
1817#[cfg(test)]
1818mod tests {
1819 use super::*;
1820 use assert_matches::assert_matches;
1821 use diagnostics_assertions::{AnyProperty, assert_data_tree};
1822 use fuchsia_async::TestExecutor;
1823 use futures::select;
1824 use test_case::test_case;
1825 use test_util::{assert_gt, assert_lt};
1826
1827 fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1828 let (_lease, peer) = zx::EventPair::create();
1829 peer
1830 }
1831
1832 #[test]
1833 fn timer_duration_no_overflow() {
1834 let duration1 = TimerDuration {
1835 resolution: zx::BootDuration::from_seconds(100_000_000),
1836 ticks: u64::MAX,
1837 };
1838 let duration2 = TimerDuration {
1839 resolution: zx::BootDuration::from_seconds(110_000_000),
1840 ticks: u64::MAX,
1841 };
1842 assert_eq!(duration1, duration1);
1843 assert_eq!(duration2, duration2);
1844
1845 assert_lt!(duration1, duration2);
1846 assert_gt!(duration2, duration1);
1847 }
1848
1849 #[test_case(
1850 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1851 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1852 )]
1853 #[test_case(
1854 TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1855 TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1856 )]
1857 #[test_case(
1858 TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1859 TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1860 )]
1861 #[test_case(
1862 TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1863 TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1864 )]
1865 fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1866 assert_eq!(one, other);
1867 }
1868
1869 #[test_case(
1870 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1871 TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1872 )]
1873 #[test_case(
1874 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1875 TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1876 )]
1877 fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1878 assert_lt!(one, other);
1879 }
1880
1881 #[test_case(
1882 TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1883 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1884 )]
1885 #[test_case(
1886 TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1887 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1888 )]
1889 fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1890 assert_gt!(one, other);
1891 }
1892
1893 #[test_case(
1894 vec![zx::BootDuration::from_nanos(1)],
1895 100,
1896 zx::BootDuration::from_nanos(0),
1897 TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1898 )]
1899 #[test_case(
1900 vec![zx::BootDuration::from_nanos(1)],
1901 100,
1902 zx::BootDuration::from_nanos(50),
1903 TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1904 )]
1905 #[test_case(
1906 vec![zx::BootDuration::from_nanos(2)],
1907 100,
1908 zx::BootDuration::from_nanos(50),
1909 TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1910 )]
1911 #[test_case(
1912 vec![zx::BootDuration::from_nanos(3)],
1913 100,
1914 zx::BootDuration::from_nanos(50),
1915 TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1917 )]
1918 #[test_case(
1919 vec![
1920 zx::BootDuration::from_nanos(3),
1921 zx::BootDuration::from_nanos(4)
1922 ],
1923 100,
1924 zx::BootDuration::from_nanos(50),
1925 TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1926 )]
1927 #[test_case(
1928 vec![
1929 zx::BootDuration::from_nanos(1000),
1930 ],
1931 100,
1932 zx::BootDuration::from_nanos(50),
1933 TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1934 "950ns negative slack is the best we can do"
1935 )]
1936 #[test_case(
1937 vec![
1938 zx::BootDuration::from_nanos(1),
1939 ],
1940 10,
1941 zx::BootDuration::from_nanos(50),
1942 TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1943 "10ns positive slack is the best we can do"
1944 )]
1945 #[test_case(
1946 vec![
1947 zx::BootDuration::from_millis(1),
1948 zx::BootDuration::from_micros(100),
1949 zx::BootDuration::from_micros(10),
1950 zx::BootDuration::from_micros(1),
1951 ],
1952 20, zx::BootDuration::from_micros(150),
1954 TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1955 "Realistic case with resolutions from driver, should be 15us"
1956 )]
1957 #[test_case(
1958 vec![
1959 zx::BootDuration::from_millis(1),
1960 zx::BootDuration::from_micros(100),
1961 zx::BootDuration::from_micros(10),
1962 zx::BootDuration::from_micros(1),
1963 ],
1964 2000, zx::BootDuration::from_micros(6000),
1966 TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1967 "Coarser exact unit wins"
1968 )]
1969 #[test_case(
1970 vec![
1971 zx::BootDuration::from_millis(1),
1972 zx::BootDuration::from_millis(10),
1973 zx::BootDuration::from_millis(100),
1974 ],
1975 1000,
1976 zx::BootDuration::from_micros(-10),
1977 TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1978 "Negative duration gets the smallest timer duration"
1979 )]
1980 #[test_case(
1981 vec![
1982 zx::BootDuration::from_millis(1),
1983 zx::BootDuration::from_millis(10),
1984 zx::BootDuration::from_millis(100),
1985 ],
1986 1000,
1987 zx::BootDuration::ZERO,
1988 TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1989 "Zero duration gets the smallest timer duration"
1990 )]
1991 fn test_pick_setting(
1992 resolutions: Vec<zx::BootDuration>,
1993 max_ticks: u64,
1994 duration: zx::BootDuration,
1995 expected: TimerDuration,
1996 ) {
1997 let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1998 let actual = config.pick_setting(duration);
1999
2000 assert_slack_eq(expected, actual);
2003 }
2004
2005 fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
2007 let slack = expected.duration() - actual.duration();
2008 assert_eq!(
2009 actual.resolution(),
2010 expected.resolution(),
2011 "\n\texpected: {} ({})\n\tactual : {} ({})\n\tslack: expected-actual={}",
2012 expected,
2013 format_duration(expected.duration()),
2014 actual,
2015 format_duration(actual.duration()),
2016 format_duration(slack)
2017 );
2018 assert_eq!(
2019 actual.ticks(),
2020 expected.ticks(),
2021 "\n\texpected: {} ({})\n\tactual : {} ({})\n\tslack: expected-actual={}",
2022 expected,
2023 format_duration(expected.duration()),
2024 actual,
2025 format_duration(actual.duration()),
2026 format_duration(slack)
2027 );
2028 }
2029
2030 #[derive(Debug)]
2031 enum FakeCmd {
2032 SetProperties {
2033 resolutions: Vec<zx::BootDuration>,
2034 max_ticks: i64,
2035 keep_alive: zx::EventPair,
2036 done: zx::Event,
2037 },
2038 }
2039
2040 use std::cell::RefCell;
2041 use std::rc::Rc;
2042
2043 fn fake_hrtimer_connection(
2049 scope: fasync::ScopeHandle,
2050 rcv: mpsc::Receiver<FakeCmd>,
2051 ) -> ffhh::DeviceProxy {
2052 debug!("fake_hrtimer_connection: entry.");
2053 let (hrtimer, mut stream) =
2054 fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
2055 scope.clone().spawn_local(async move {
2056 let mut rcv = rcv.fuse();
2057 let timer_properties = Rc::new(RefCell::new(None));
2058 let wake_lease = Rc::new(RefCell::new(None));
2059
2060 let timer_running = Rc::new(RefCell::new(false));
2064
2065 loop {
2066 let timer_properties = timer_properties.clone();
2067 let wake_lease = wake_lease.clone();
2068 select! {
2069 cmd = rcv.next() => {
2070 debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
2071 match cmd {
2072 Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
2073 let mut timer_props = vec![];
2074 for v in 0..10 {
2075 timer_props.push(ffhh::TimerProperties {
2076 supported_resolutions: Some(
2077 resolutions.iter()
2078 .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
2079 max_ticks: Some(max_ticks.try_into().unwrap()),
2080 supports_wait: Some(true),
2082 id: Some(v),
2083 ..Default::default()
2084 },
2085 );
2086 }
2087 *timer_properties.borrow_mut() = Some(timer_props);
2088 *wake_lease.borrow_mut() = Some(keep_alive);
2089 debug!("set timer properties to: {:?}", timer_properties);
2090 signal(&done);
2091 }
2092 e => {
2093 panic!("unrecognized command: {:?}", e);
2094 }
2095 }
2096 },
2098 event = stream.next() => {
2099 debug!("fake_hrtimer_connection: event: {:?}", event);
2100 if let Some(Ok(event)) = event {
2101 match event {
2102 ffhh::DeviceRequest::Start { responder, .. } => {
2103 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2104 *timer_running.borrow_mut() = true;
2105 responder.send(Ok(())).expect("");
2106 }
2107 ffhh::DeviceRequest::Stop { responder, .. } => {
2108 *timer_running.borrow_mut() = false;
2109 responder.send(Ok(())).expect("");
2110 }
2111 ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2112 responder.send(Ok(1)).expect("");
2113 }
2114 ffhh::DeviceRequest::SetEvent { responder, .. } => {
2115 responder.send(Ok(())).expect("");
2116 }
2117 ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2118 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2119 *timer_running.borrow_mut() = true;
2120 debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2121 let ticks: i64 = ticks.try_into().unwrap();
2122 let sleep_duration = zx::BootDuration::from_nanos(ticks * match resolution {
2123 ffhh::Resolution::Duration(e) => e,
2124 _ => {
2125 error!("resolution has an unexpected value");
2126 1
2127 }
2128 });
2129 let timer_running_clone = timer_running.clone();
2130 scope.spawn_local(async move {
2131 signal(&setup_event);
2134
2135 fasync::Timer::new(sleep_duration).await;
2138 *timer_running_clone.borrow_mut() = false;
2139 responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2140 debug!("StartAndWait: hrtimer expired");
2141 });
2142 }
2143 ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2144 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2145 *timer_running.borrow_mut() = true;
2146 responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2147 }
2148 ffhh::DeviceRequest::GetProperties { responder, .. } => {
2149 if (*timer_properties).borrow().is_none() {
2150 error!("timer_properties is empty, this is not what you want!");
2151 }
2152 responder
2153 .send(ffhh::Properties {
2154 timers_properties: (*timer_properties).borrow().clone(),
2155 ..Default::default()
2156 })
2157 .expect("");
2158 }
2159 ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2160 responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2161 }
2162 ffhh::DeviceRequest::ReadClock { responder, .. } => {
2163 responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2164 }
2165 ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2166 }
2167 }
2168 },
2169 }
2170 }
2171 });
2172 hrtimer
2173 }
2174
2175 fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2176 orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2177 }
2178
2179 struct TestContext {
2180 wake_proxy: fta::WakeAlarmsProxy,
2181 _scope: fasync::Scope,
2182 _cmd_tx: mpsc::Sender<FakeCmd>,
2183 utc_clock: fxr::UtcClock,
2185 utc_backstop: fxr::UtcInstant,
2186 }
2187
2188 impl TestContext {
2189 async fn new() -> Self {
2190 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2191
2192 let scope = fasync::Scope::new();
2193 let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2194 let utc_clock =
2195 fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2196 let utc_clone = clone_utc_clock(&utc_clock);
2197 let (mut cmd_tx, wake_proxy) = {
2198 let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2199 let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2200
2201 let inspector = finspect::component::inspector();
2202 let alarms = Rc::new(Loop::new(
2203 scope.to_handle(),
2204 hrtimer_proxy,
2205 inspector.root().create_child("test"),
2206 utc_clone,
2207 ));
2208
2209 let (proxy, stream) =
2210 fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2211 scope.spawn_local(async move {
2212 serve(alarms, stream).await;
2213 });
2214 (tx, proxy)
2215 };
2216
2217 let (_wake_lease, peer) = zx::EventPair::create();
2218 let done = zx::Event::create();
2219 cmd_tx
2220 .start_send(FakeCmd::SetProperties {
2221 resolutions: vec![zx::Duration::from_nanos(1)],
2222 max_ticks: 100,
2223 keep_alive: peer,
2224 done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2225 })
2226 .unwrap();
2227
2228 assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2230
2231 Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2232 }
2233 }
2234
2235 impl Drop for TestContext {
2236 fn drop(&mut self) {
2237 assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2238 }
2239 }
2240
2241 #[fuchsia::test(allow_stalls = false)]
2242 async fn test_basic_timed_wait() {
2243 let ctx = TestContext::new().await;
2244
2245 let deadline = zx::BootInstant::from_nanos(100);
2246 let setup_done = zx::Event::create();
2247 let mut set_task = ctx.wake_proxy.set_and_wait(
2248 deadline.into(),
2249 fta::SetMode::NotifySetupDone(
2250 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2251 ),
2252 "Hello".into(),
2253 );
2254
2255 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2256
2257 let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2258 assert_matches!(
2259 TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2260 Poll::Ready(Ok(_)),
2261 "Setup event not triggered after scheduling an alarm"
2262 );
2263
2264 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2265 assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2266 }
2267
2268 #[fuchsia::test(allow_stalls = false)]
2269 async fn test_basic_timed_wait_notify() {
2270 const ALARM_ID: &str = "Hello";
2271 let ctx = TestContext::new().await;
2272
2273 let (notifier_client, mut notifier_stream) =
2274 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2275 let setup_done = zx::Event::create();
2276 assert_matches!(
2277 ctx.wake_proxy
2278 .set(
2279 notifier_client,
2280 fidl::BootInstant::from_nanos(2),
2281 fta::SetMode::NotifySetupDone(
2282 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2283 ),
2284 ALARM_ID,
2285 )
2286 .await,
2287 Ok(Ok(()))
2288 );
2289
2290 let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2291 assert_matches!(
2292 TestExecutor::poll_until_stalled(&mut done_task).await,
2293 Poll::Ready(Ok(_)),
2294 "Setup event not triggered after scheduling an alarm"
2295 );
2296
2297 let mut next_task = notifier_stream.next();
2298 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2299
2300 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2301 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2302
2303 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2304 assert_matches!(
2305 TestExecutor::poll_until_stalled(next_task).await,
2306 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2307 );
2308 }
2309
2310 #[fuchsia::test(allow_stalls = false)]
2311 async fn test_two_alarms_same() {
2312 const DEADLINE_NANOS: i64 = 100;
2313
2314 let ctx = TestContext::new().await;
2315
2316 let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2317 fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2318 fta::SetMode::KeepAlive(fake_wake_lease()),
2319 "Hello1".into(),
2320 );
2321 let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2322 fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2323 fta::SetMode::KeepAlive(fake_wake_lease()),
2324 "Hello2".into(),
2325 );
2326
2327 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2328 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2329
2330 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2331
2332 assert_matches!(
2333 TestExecutor::poll_until_stalled(&mut set_task_1).await,
2334 Poll::Ready(Ok(Ok(_)))
2335 );
2336 assert_matches!(
2337 TestExecutor::poll_until_stalled(&mut set_task_2).await,
2338 Poll::Ready(Ok(Ok(_)))
2339 );
2340 }
2341
2342 #[fuchsia::test(allow_stalls = false)]
2343 async fn test_two_alarms_same_notify() {
2344 const DEADLINE_NANOS: i64 = 100;
2345 const ALARM_ID_1: &str = "Hello1";
2346 const ALARM_ID_2: &str = "Hello2";
2347
2348 let ctx = TestContext::new().await;
2349
2350 let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2351 let (notifier_client, notifier_stream) =
2352 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2353 assert_matches!(
2354 ctx.wake_proxy
2355 .set(
2356 notifier_client,
2357 fidl::BootInstant::from_nanos(deadline_nanos),
2358 fta::SetMode::KeepAlive(fake_wake_lease()),
2359 alarm_id,
2360 )
2361 .await,
2362 Ok(Ok(()))
2363 );
2364 notifier_stream
2365 };
2366
2367 let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2368 let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2369
2370 let mut next_task_1 = notifier_1.next();
2371 let mut next_task_2 = notifier_2.next();
2372
2373 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2374 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2375
2376 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2377
2378 assert_matches!(
2379 TestExecutor::poll_until_stalled(&mut next_task_1).await,
2380 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2381 );
2382 assert_matches!(
2383 TestExecutor::poll_until_stalled(&mut next_task_2).await,
2384 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2385 );
2386
2387 assert_matches!(
2388 TestExecutor::poll_until_stalled(notifier_1.next()).await,
2389 Poll::Ready(None)
2390 );
2391 assert_matches!(
2392 TestExecutor::poll_until_stalled(notifier_2.next()).await,
2393 Poll::Ready(None)
2394 );
2395 }
2396
2397 #[test_case(100, 200 ; "push out")]
2398 #[test_case(200, 100 ; "pull in")]
2399 #[fuchsia::test(allow_stalls = false)]
2400 async fn test_two_alarms_different(
2401 first_deadline_nanos: i64,
2403 second_deadline_nanos: i64,
2405 ) {
2406 let ctx = TestContext::new().await;
2407
2408 let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2409 fidl::BootInstant::from_nanos(first_deadline_nanos),
2410 fta::SetMode::KeepAlive(fake_wake_lease()),
2411 "Hello1".into(),
2412 );
2413 let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2414 fidl::BootInstant::from_nanos(second_deadline_nanos),
2415 fta::SetMode::KeepAlive(fake_wake_lease()),
2416 "Hello2".into(),
2417 );
2418
2419 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2420 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2421
2422 let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2424 tasks.sort_by(|a, b| a.0.cmp(&b.0));
2425 let [mut first_task, mut second_task] = tasks;
2426
2427 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2429 assert_matches!(
2430 TestExecutor::poll_until_stalled(&mut first_task.1).await,
2431 Poll::Ready(Ok(Ok(_)))
2432 );
2433 assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2434
2435 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2436 assert_matches!(
2437 TestExecutor::poll_until_stalled(&mut second_task.1).await,
2438 Poll::Ready(Ok(Ok(_)))
2439 );
2440 }
2441
2442 #[test_case(100, 200 ; "push out")]
2443 #[test_case(200, 100 ; "pull in")]
2444 #[fuchsia::test(allow_stalls = false)]
2445 async fn test_two_alarms_different_notify(
2446 first_deadline_nanos: i64,
2448 second_deadline_nanos: i64,
2450 ) {
2451 const ALARM_ID_1: &str = "Hello1";
2452 const ALARM_ID_2: &str = "Hello2";
2453
2454 let ctx = TestContext::new().await;
2455
2456 let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2457 let (notifier_client, notifier_stream) =
2458 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2459 assert_matches!(
2460 ctx.wake_proxy
2461 .set(
2462 notifier_client,
2463 fidl::BootInstant::from_nanos(deadline_nanos),
2464 fta::SetMode::KeepAlive(fake_wake_lease()),
2465 alarm_id,
2466 )
2467 .await,
2468 Ok(Ok(()))
2469 );
2470 notifier_stream
2471 };
2472
2473 let mut notifier_all = futures::stream::select_all([
2475 schedule(first_deadline_nanos, ALARM_ID_1).await,
2476 schedule(second_deadline_nanos, ALARM_ID_2).await,
2477 ]);
2478 let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2479 let mut tasks =
2480 [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2481 tasks.sort_by(|a, b| a.0.cmp(&b.0));
2482 tasks
2483 };
2484
2485 let mut next_task = notifier_all.next();
2487 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2488
2489 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2490 assert_matches!(
2491 TestExecutor::poll_until_stalled(next_task).await,
2492 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2493 );
2494
2495 let mut next_task = notifier_all.next();
2496 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2497
2498 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2499 assert_matches!(
2500 TestExecutor::poll_until_stalled(next_task).await,
2501 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2502 );
2503 assert_matches!(
2504 TestExecutor::poll_until_stalled(notifier_all.next()).await,
2505 Poll::Ready(None)
2506 );
2507 }
2508
2509 #[fuchsia::test(allow_stalls = false)]
2510 async fn test_alarm_immediate() {
2511 let ctx = TestContext::new().await;
2512 let mut set_task = ctx.wake_proxy.set_and_wait(
2513 fidl::BootInstant::INFINITE_PAST,
2514 fta::SetMode::KeepAlive(fake_wake_lease()),
2515 "Hello1".into(),
2516 );
2517 assert_matches!(
2518 TestExecutor::poll_until_stalled(&mut set_task).await,
2519 Poll::Ready(Ok(Ok(_)))
2520 );
2521 }
2522
2523 #[fuchsia::test(allow_stalls = false)]
2524 async fn test_alarm_immediate_notify() {
2525 const ALARM_ID: &str = "Hello";
2526 let ctx = TestContext::new().await;
2527
2528 let (notifier_client, mut notifier_stream) =
2529 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2530
2531 let mut set_task = ctx.wake_proxy.set(
2532 notifier_client,
2533 fidl::BootInstant::INFINITE_PAST,
2534 fta::SetMode::KeepAlive(fake_wake_lease()),
2535 ALARM_ID,
2536 );
2537 assert_matches!(
2538 TestExecutor::poll_until_stalled(&mut set_task).await,
2539 Poll::Ready(Ok(Ok(_)))
2540 );
2541 assert_matches!(
2542 TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2543 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2544 );
2545 }
2546
2547 #[test_case(200, 100 ; "pull in")]
2550 #[test_case(100, 200 ; "push out")]
2551 #[test_case(100, 100 ; "replace with the same deadline")]
2552 #[fuchsia::test(allow_stalls = false)]
2553 async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2554 const ALARM_ID: &str = "Hello";
2555
2556 let ctx = TestContext::new().await;
2557
2558 let schedule = |deadline_nanos: i64| {
2559 let setup_done = zx::Event::create();
2560 let task = ctx.wake_proxy.set_and_wait(
2561 fidl::BootInstant::from_nanos(deadline_nanos),
2562 fta::SetMode::NotifySetupDone(
2563 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2564 ),
2565 ALARM_ID.into(),
2566 );
2567 (task, setup_done)
2568 };
2569
2570 let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2573 fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2574 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2575
2576 let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2579 fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2580 assert_matches!(
2581 TestExecutor::poll_until_stalled(&mut set_task_1).await,
2582 Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2583 );
2584 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2585
2586 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2588 .await;
2589 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2590 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2591 .await;
2592 assert_matches!(
2593 TestExecutor::poll_until_stalled(&mut set_task_2).await,
2594 Poll::Ready(Ok(Ok(_)))
2595 );
2596
2597 assert_data_tree!(finspect::component::inspector(), root: {
2600 test: {
2601 hardware: {
2602 current_deadline: "(none)",
2604 remaining_until_alarm: "(none)",
2605 },
2606 now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2607 now_ns: override_deadline_nanos,
2608 pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2609 pending_timers_count: 0u64,
2610 requested_deadlines_ns: AnyProperty,
2611 schedule_delay_ns: AnyProperty,
2612 slack_ns: AnyProperty,
2613 boot_deadlines_count: AnyProperty,
2614 utc_deadlines_count: AnyProperty,
2615 debug_node: contains {},
2616 },
2617 });
2618 }
2619
2620 #[fuchsia::test(allow_stalls = false)]
2623 async fn test_reschedule_notify() {
2624 const ALARM_ID: &str = "Hello";
2625 const INITIAL_DEADLINE_NANOS: i64 = 100;
2626 const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2627
2628 let ctx = TestContext::new().await;
2629
2630 let schedule = async |deadline_nanos: i64| {
2631 let (notifier_client, notifier_stream) =
2632 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2633 assert_matches!(
2634 ctx.wake_proxy
2635 .set(
2636 notifier_client,
2637 fidl::BootInstant::from_nanos(deadline_nanos),
2638 fta::SetMode::KeepAlive(fake_wake_lease()),
2639 ALARM_ID.into(),
2640 )
2641 .await,
2642 Ok(Ok(()))
2643 );
2644 notifier_stream
2645 };
2646
2647 let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2648 let mut next_task_1 = notifier_1.next();
2649 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2650
2651 let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2652 let mut next_task_2 = notifier_2.next();
2653 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2654
2655 assert_matches!(
2657 TestExecutor::poll_until_stalled(&mut next_task_1).await,
2658 Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2659 );
2660 assert_matches!(
2661 TestExecutor::poll_until_stalled(notifier_1.next()).await,
2662 Poll::Ready(None)
2663 );
2664
2665 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2667 .await;
2668 assert_matches!(
2669 TestExecutor::poll_until_stalled(next_task_2).await,
2670 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2671 );
2672 assert_matches!(
2673 TestExecutor::poll_until_stalled(notifier_2.next()).await,
2674 Poll::Ready(None)
2675 );
2676 }
2677
2678 #[fuchsia::test(allow_stalls = false)]
2681 async fn test_fidl_error_on_reschedule() {
2682 const DEADLINE_NANOS: i64 = 100;
2683
2684 let (wake_proxy, _stream) =
2685 fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2686 drop(_stream);
2687
2688 assert_matches!(
2689 wake_proxy
2690 .set_and_wait(
2691 zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2692 fta::SetMode::KeepAlive(fake_wake_lease()),
2693 "hello1".into(),
2694 )
2695 .await,
2696 Err(fidl::Error::ClientChannelClosed { .. })
2697 );
2698
2699 assert_matches!(
2700 wake_proxy
2701 .set_and_wait(
2702 zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2703 fta::SetMode::KeepAlive(fake_wake_lease()),
2704 "hello2".into(),
2705 )
2706 .await,
2707 Err(fidl::Error::ClientChannelClosed { .. })
2708 );
2709 }
2710
2711 #[fuchsia::test(allow_stalls = false)]
2714 async fn test_set_and_wait_utc() {
2715 const ALARM_ID: &str = "Hello_set_and_wait_utc";
2716 let ctx = TestContext::new().await;
2717
2718 let now_boot = fasync::BootInstant::now();
2719 ctx.utc_clock
2720 .update(
2721 zx::ClockUpdate::builder()
2722 .absolute_value(now_boot.into(), ctx.utc_backstop)
2723 .build(),
2724 )
2725 .unwrap();
2726
2727 let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2728 let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2729 &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2730 fta::SetMode::KeepAlive(fake_wake_lease()),
2731 ALARM_ID,
2732 );
2733
2734 assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2736
2737 ctx.utc_clock
2739 .update(
2740 zx::ClockUpdate::builder()
2741 .absolute_value(
2742 now_boot.into(),
2743 ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2744 )
2745 .build(),
2746 )
2747 .unwrap();
2748
2749 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2751 assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2752 }
2753}