diagnostics/task_metrics/
task_info.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::task_metrics::constants::{COMPONENT_CPU_MAX_SAMPLES, CPU_SAMPLE_PERIOD};
6use crate::task_metrics::measurement::{Measurement, MeasurementsQueue};
7use crate::task_metrics::runtime_stats_source::RuntimeStatsSource;
8use fuchsia_async as fasync;
9use fuchsia_inspect::{self as inspect, HistogramProperty, UintLinearHistogramProperty};
10use fuchsia_sync::Mutex;
11use injectable_time::TimeSource;
12use log::debug;
13use moniker::ExtendedMoniker;
14use std::fmt::Debug;
15use std::sync::{Arc, Weak};
16use zx::sys::{self as zx_sys, zx_system_get_num_cpus};
17
18pub(crate) fn create_cpu_histogram(
19    node: &inspect::Node,
20    moniker: &ExtendedMoniker,
21) -> inspect::UintLinearHistogramProperty {
22    node.create_uint_linear_histogram(
23        moniker.to_string(),
24        inspect::LinearHistogramParams { floor: 1, step_size: 1, buckets: 99 },
25    )
26}
27
28fn num_cpus() -> i64 {
29    // zx_system_get_num_cpus() is FFI to C++. It simply returns a value from a static struct
30    // so it should always be safe to call.
31    (unsafe { zx_system_get_num_cpus() }) as i64
32}
33
34#[derive(Debug)]
35pub(crate) enum TaskState<T: RuntimeStatsSource + Debug> {
36    TerminatedAndMeasured,
37    Terminated(T),
38    Alive(T),
39}
40
41impl<T> From<T> for TaskState<T>
42where
43    T: RuntimeStatsSource + Debug,
44{
45    fn from(task: T) -> TaskState<T> {
46        TaskState::Alive(task)
47    }
48}
49
50#[derive(Debug)]
51pub struct TaskInfo<T: RuntimeStatsSource + Debug> {
52    koid: zx_sys::zx_koid_t,
53    pub(crate) task: Arc<Mutex<TaskState<T>>>,
54    pub(crate) time_source: Arc<dyn TimeSource + Sync + Send>,
55    pub(crate) has_parent_task: bool,
56    pub(crate) measurements: MeasurementsQueue,
57    exited_cpu: Option<Measurement>,
58    histogram: Option<UintLinearHistogramProperty>,
59    previous_cpu: zx::MonotonicDuration,
60    previous_histogram_timestamp: i64,
61    cpu_cores: i64,
62    sample_period: std::time::Duration,
63    children: Vec<Weak<Mutex<TaskInfo<T>>>>,
64    _terminated_task: fasync::Task<()>,
65    pub(crate) most_recent_measurement_nanos: Arc<Mutex<Option<i64>>>,
66}
67
68impl<T: 'static + RuntimeStatsSource + Debug + Send + Sync> TaskInfo<T> {
69    /// Creates a new `TaskInfo` from the given cpu stats provider.
70    // Due to https://github.com/rust-lang/rust/issues/50133 we cannot just derive TryFrom on a
71    // generic type given a collision with the blanket implementation.
72    pub fn try_from(
73        task: T,
74        histogram: Option<UintLinearHistogramProperty>,
75        time_source: Arc<dyn TimeSource + Sync + Send>,
76    ) -> Result<Self, zx::Status> {
77        Self::try_from_internal(task, histogram, time_source, CPU_SAMPLE_PERIOD, num_cpus())
78    }
79}
80
81impl<T: 'static + RuntimeStatsSource + Debug + Send + Sync> TaskInfo<T> {
82    // Injects a couple of test dependencies
83    fn try_from_internal(
84        task: T,
85        histogram: Option<UintLinearHistogramProperty>,
86        time_source: Arc<dyn TimeSource + Sync + Send>,
87        sample_period: std::time::Duration,
88        cpu_cores: i64,
89    ) -> Result<Self, zx::Status> {
90        let koid = task.koid()?;
91        let maybe_handle = task.handle_ref().duplicate(zx::Rights::SAME_RIGHTS).ok();
92        let task_state = Arc::new(Mutex::new(TaskState::from(task)));
93        let weak_task_state = Arc::downgrade(&task_state);
94        let most_recent_measurement_nanos = Arc::new(Mutex::new(None));
95        let movable_most_recent_measurement_nanos = most_recent_measurement_nanos.clone();
96        let movable_time_source = time_source.clone();
97        let _terminated_task = fasync::Task::spawn(async move {
98            if let Some(handle) = maybe_handle {
99                fasync::OnSignals::new(&handle, zx::Signals::TASK_TERMINATED)
100                    .await
101                    .map(|_: fidl::Signals| ()) // Discard.
102                    .unwrap_or_else(|error| debug!(error:%; "error creating signal handler"));
103            }
104
105            // If we failed to duplicate the handle then still mark this task as terminated to
106            // ensure it's cleaned up.
107            if let Some(task_state) = weak_task_state.upgrade() {
108                {
109                    let mut terminated_at_nanos_guard =
110                        movable_most_recent_measurement_nanos.lock();
111                    *terminated_at_nanos_guard = Some(movable_time_source.now());
112                }
113                let mut state = task_state.lock();
114                *state = match std::mem::replace(&mut *state, TaskState::TerminatedAndMeasured) {
115                    s @ TaskState::TerminatedAndMeasured => s,
116                    TaskState::Alive(t) => TaskState::Terminated(t),
117                    s @ TaskState::Terminated(_) => s,
118                };
119            }
120        });
121        Ok(Self {
122            koid,
123            task: task_state,
124            has_parent_task: false,
125            measurements: MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, time_source.clone()),
126            children: vec![],
127            cpu_cores,
128            sample_period,
129            histogram,
130            previous_cpu: zx::MonotonicDuration::from_nanos(0),
131            previous_histogram_timestamp: time_source.now(),
132            time_source,
133            _terminated_task,
134            most_recent_measurement_nanos,
135            exited_cpu: None,
136        })
137    }
138
139    /// Take a new measurement. If the handle of this task is invalid, then it keeps track of how
140    /// many measurements would have been done. When the maximum amount allowed is hit, then it
141    /// drops the oldest measurement.
142    pub fn measure_if_no_parent(&mut self) -> Option<&Measurement> {
143        // Tasks with a parent are measured by the parent as done right below in the internal
144        // `measure_subtree`.
145        if self.has_parent_task {
146            return None;
147        }
148
149        self.measure_subtree()
150    }
151
152    /// Adds a weak pointer to a task for which this task is the parent.
153    pub fn add_child(&mut self, task: Weak<Mutex<TaskInfo<T>>>) {
154        self.children.push(task);
155    }
156
157    pub fn most_recent_measurement(&self) -> Option<zx::BootInstant> {
158        self.most_recent_measurement_nanos.lock().map(|t| zx::BootInstant::from_nanos(t))
159    }
160
161    /// Takes the MeasurementsQueue from this task, replacing it with an empty one.
162    /// This function is only valid when `self.task == TaskState::Terminated*`.
163    /// The task will be considered stale after this function runs.
164    pub fn take_measurements_queue(&mut self) -> Result<MeasurementsQueue, ()> {
165        match &*self.task.lock() {
166            TaskState::TerminatedAndMeasured | TaskState::Terminated(_) => Ok(std::mem::replace(
167                &mut self.measurements,
168                MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, self.time_source.clone()),
169            )),
170            _ => Err(()),
171        }
172    }
173
174    /// Take a zero-valued measurement at timestamp `t`.
175    ///
176    /// Specifically meant for the very first measurement taken.
177    pub fn record_measurement_with_start_time(&mut self, t: zx::BootInstant) {
178        self.record_measurement(Measurement::empty(t));
179    }
180
181    fn record_measurement(&mut self, m: Measurement) {
182        let current_cpu = *m.cpu_time();
183        self.add_to_histogram(current_cpu - self.previous_cpu, *m.timestamp());
184        self.previous_cpu = current_cpu;
185        self.measurements.insert(m);
186    }
187
188    fn measure_subtree<'a>(&'a mut self) -> Option<&'a Measurement> {
189        let (task_terminated_can_measure, runtime_info_res) = {
190            let mut guard = self.task.lock();
191            match &*guard {
192                TaskState::TerminatedAndMeasured => {
193                    self.measurements.insert_post_invalidation();
194                    return None;
195                }
196                TaskState::Terminated(task) => {
197                    let result = task.get_runtime_info();
198                    *guard = TaskState::TerminatedAndMeasured;
199                    let mut terminated_at_nanos_guard = self.most_recent_measurement_nanos.lock();
200                    *terminated_at_nanos_guard = Some(self.time_source.now());
201                    (true, result)
202                }
203                TaskState::Alive(task) => (false, task.get_runtime_info()),
204            }
205        };
206        if let Ok(runtime_info) = runtime_info_res {
207            let mut measurement = Measurement::from_runtime_info(
208                runtime_info,
209                zx::BootInstant::from_nanos(self.time_source.now()),
210            );
211            // Subtract all child measurements.
212            let mut alive_children = vec![];
213            while let Some(weak_child) = self.children.pop() {
214                if let Some(child) = weak_child.upgrade() {
215                    let mut child_guard = child.lock();
216                    if let Some(child_measurement) = child_guard.measure_subtree() {
217                        measurement -= child_measurement;
218                    }
219                    if child_guard.is_alive() {
220                        alive_children.push(weak_child);
221                    }
222                }
223            }
224            self.children = alive_children;
225            self.record_measurement(measurement);
226
227            if task_terminated_can_measure {
228                self.exited_cpu = self.measurements.most_recent_measurement().cloned();
229                return None;
230            }
231            return self.measurements.most_recent_measurement();
232        }
233        None
234    }
235
236    // Add a measurement to this task's histogram.
237    fn add_to_histogram(
238        &mut self,
239        cpu_time_delta: zx::MonotonicDuration,
240        timestamp: zx::BootInstant,
241    ) {
242        if let Some(histogram) = &self.histogram {
243            let time_value: i64 = timestamp.into_nanos();
244            let elapsed_time = time_value - self.previous_histogram_timestamp;
245            self.previous_histogram_timestamp = time_value;
246            if elapsed_time < ((self.sample_period.as_nanos() as i64) * 9 / 10) {
247                return;
248            }
249            let available_core_time = elapsed_time * self.cpu_cores;
250            if available_core_time != 0 {
251                // Multiply by 100 to get percent. Add available_core_time-1 to compute ceil().
252                let cpu_numerator =
253                    (cpu_time_delta.into_nanos() as i64) * 100 + available_core_time - 1;
254                histogram.insert((cpu_numerator / available_core_time) as u64);
255            }
256        }
257    }
258
259    /// A task is alive when:
260    /// - Its handle is valid, or
261    /// - There's at least one measurement saved.
262    pub fn is_alive(&self) -> bool {
263        let task_state_terminated_and_measured =
264            matches!(*self.task.lock(), TaskState::TerminatedAndMeasured);
265        let task_has_real_measurements = !self.measurements.no_true_measurements();
266
267        !task_state_terminated_and_measured || task_has_real_measurements
268    }
269
270    pub fn exited_cpu(&self) -> Option<&Measurement> {
271        self.exited_cpu.as_ref()
272    }
273
274    /// Writes the task measurements under the given inspect node `parent`.
275    pub fn record_to_node(&self, parent: &inspect::Node) {
276        let node = parent.create_child(self.koid.to_string());
277        self.measurements.record_to_node(&node);
278        parent.record(node);
279    }
280
281    pub fn koid(&self) -> zx_sys::zx_koid_t {
282        self.koid
283    }
284
285    #[cfg(test)]
286    pub fn total_measurements(&self) -> usize {
287        self.measurements.true_measurement_count()
288    }
289}
290
291#[cfg(test)]
292mod tests {
293    use super::*;
294    use crate::task_metrics::testing::FakeTask;
295    use assert_matches::assert_matches;
296    use diagnostics_assertions::assert_data_tree;
297    use diagnostics_hierarchy::{ArrayContent, DiagnosticsHierarchyGetter, LinearHistogram};
298    use injectable_time::FakeTime;
299
300    fn take_measurement_then_tick_clock<
301        'a,
302        T: 'static + RuntimeStatsSource + Debug + Send + Sync,
303    >(
304        ti: &'a mut TaskInfo<T>,
305        clock: &Arc<FakeTime>,
306    ) -> Option<&'a Measurement> {
307        let m = ti.measure_if_no_parent();
308        clock.add_ticks(CPU_SAMPLE_PERIOD.as_nanos() as i64);
309        m
310    }
311
312    #[fuchsia::test]
313    async fn rotates_measurements_per_task() {
314        // TODO(https://fxbug.dev/462815022) remove once deadlocks addressed
315        fuchsia_sync::suppress_lock_cycle_panics();
316
317        // Set up test
318        let clock = Arc::new(FakeTime::new());
319        let mut task: TaskInfo<FakeTask> =
320            TaskInfo::try_from(FakeTask::default(), None /* histogram */, clock.clone()).unwrap();
321        assert!(task.is_alive());
322
323        // Take three measurements.
324        take_measurement_then_tick_clock(&mut task, &clock);
325        assert_eq!(task.measurements.true_measurement_count(), 1);
326        take_measurement_then_tick_clock(&mut task, &clock);
327        assert_eq!(task.measurements.true_measurement_count(), 2);
328        take_measurement_then_tick_clock(&mut task, &clock);
329        assert!(task.is_alive());
330        assert_eq!(task.measurements.true_measurement_count(), 3);
331
332        // Terminate the task
333        task.force_terminate().await;
334
335        // This will perform the post-termination measurement and bring the state to terminated and
336        // measured.
337        take_measurement_then_tick_clock(&mut task, &clock);
338        assert_eq!(task.measurements.true_measurement_count(), 4);
339        assert_matches!(*task.task.lock(), TaskState::TerminatedAndMeasured);
340
341        for _ in 4..COMPONENT_CPU_MAX_SAMPLES {
342            take_measurement_then_tick_clock(&mut task, &clock);
343            assert_eq!(task.measurements.true_measurement_count(), 4);
344        }
345
346        take_measurement_then_tick_clock(&mut task, &clock); // 1 dropped, 3 left
347        assert!(task.is_alive());
348        assert_eq!(task.measurements.true_measurement_count(), 3);
349        take_measurement_then_tick_clock(&mut task, &clock); // 2 dropped, 2 left
350        assert!(task.is_alive());
351        assert_eq!(task.measurements.true_measurement_count(), 2);
352        take_measurement_then_tick_clock(&mut task, &clock); // 3 dropped, 1 left
353        assert!(task.is_alive());
354        assert_eq!(task.measurements.true_measurement_count(), 1);
355
356        // Take one last measure.
357        take_measurement_then_tick_clock(&mut task, &clock); // 4 dropped, 0 left
358        assert!(!task.is_alive());
359        assert_eq!(task.measurements.true_measurement_count(), 0);
360    }
361
362    #[fuchsia::test]
363    async fn write_inspect() {
364        let time = Arc::new(FakeTime::new());
365        let mut task = TaskInfo::try_from(
366            FakeTask::new(
367                1,
368                vec![
369                    zx::TaskRuntimeInfo {
370                        cpu_time: 2,
371                        queue_time: 4,
372                        ..zx::TaskRuntimeInfo::default()
373                    },
374                    zx::TaskRuntimeInfo {
375                        cpu_time: 6,
376                        queue_time: 8,
377                        ..zx::TaskRuntimeInfo::default()
378                    },
379                ],
380            ),
381            None, /* histogram */
382            time.clone(),
383        )
384        .unwrap();
385
386        time.set_ticks(1);
387        task.measure_if_no_parent();
388        time.set_ticks(2);
389        task.measure_if_no_parent();
390
391        let inspector = inspect::Inspector::default();
392        task.record_to_node(inspector.root());
393        assert_data_tree!(inspector, root: {
394            "1": {
395                timestamps: vec![1i64, 2],
396                cpu_times: vec![2i64, 6],
397                queue_times: vec![4i64, 8],
398            }
399        });
400    }
401
402    #[fuchsia::test]
403    async fn write_more_than_max_samples() {
404        let inspector = inspect::Inspector::default();
405        let clock = Arc::new(FakeTime::new());
406        let mut task = TaskInfo::try_from(
407            FakeTask::new(
408                1,
409                vec![
410                    zx::TaskRuntimeInfo {
411                        cpu_time: 2,
412                        queue_time: 4,
413                        ..zx::TaskRuntimeInfo::default()
414                    },
415                    zx::TaskRuntimeInfo {
416                        cpu_time: 6,
417                        queue_time: 8,
418                        ..zx::TaskRuntimeInfo::default()
419                    },
420                ],
421            ),
422            None, /* histogram */
423            clock.clone(),
424        )
425        .unwrap();
426
427        for _ in 0..(COMPONENT_CPU_MAX_SAMPLES + 10) {
428            assert!(take_measurement_then_tick_clock(&mut task, &clock).is_some());
429        }
430
431        assert_eq!(task.measurements.true_measurement_count(), COMPONENT_CPU_MAX_SAMPLES);
432        task.record_to_node(inspector.root());
433        assert_eq!(60, COMPONENT_CPU_MAX_SAMPLES);
434        assert_eq!(task.measurements.true_measurement_count(), 60);
435
436        let hierarchy = inspector.get_diagnostics_hierarchy().await;
437        for top_level in &hierarchy.children {
438            let child = hierarchy.get_child(&top_level.name).unwrap();
439            let timestamps = child.get_property("timestamps").unwrap().int_array().unwrap();
440            assert_eq!(timestamps.len(), COMPONENT_CPU_MAX_SAMPLES);
441            let cpu_times = child.get_property("cpu_times").unwrap().int_array().unwrap();
442            assert_eq!(cpu_times.len(), COMPONENT_CPU_MAX_SAMPLES);
443            let queue_times = child.get_property("queue_times").unwrap().int_array().unwrap();
444            assert_eq!(queue_times.len(), COMPONENT_CPU_MAX_SAMPLES);
445        }
446    }
447
448    #[fuchsia::test]
449    async fn more_than_max_samples_offset_time() {
450        let inspector = inspect::Inspector::default();
451        let clock = Arc::new(FakeTime::new());
452        let mut task = TaskInfo::try_from(
453            FakeTask::new(
454                1,
455                vec![
456                    zx::TaskRuntimeInfo {
457                        cpu_time: 2,
458                        queue_time: 4,
459                        ..zx::TaskRuntimeInfo::default()
460                    },
461                    zx::TaskRuntimeInfo {
462                        cpu_time: 6,
463                        queue_time: 8,
464                        ..zx::TaskRuntimeInfo::default()
465                    },
466                ],
467            ),
468            None, /* histogram */
469            clock.clone(),
470        )
471        .unwrap();
472
473        for _ in 0..COMPONENT_CPU_MAX_SAMPLES {
474            assert!(take_measurement_then_tick_clock(&mut task, &clock).is_some());
475        }
476
477        task.measure_if_no_parent();
478
479        // this will create >COMPONENT_CPU_MAX_SAMPLES within the max duration of one hour,
480        // but should still cause eviction
481        clock.add_ticks((CPU_SAMPLE_PERIOD - std::time::Duration::from_secs(1)).as_nanos() as i64);
482        task.measure_if_no_parent();
483
484        assert_eq!(task.measurements.true_measurement_count(), COMPONENT_CPU_MAX_SAMPLES);
485        task.record_to_node(inspector.root());
486    }
487
488    #[fuchsia::test]
489    async fn measure_with_children() {
490        let clock = Arc::new(FakeTime::new());
491        let mut task = TaskInfo::try_from(
492            FakeTask::new(
493                1,
494                vec![
495                    zx::TaskRuntimeInfo {
496                        cpu_time: 100,
497                        queue_time: 200,
498                        ..zx::TaskRuntimeInfo::default()
499                    },
500                    zx::TaskRuntimeInfo {
501                        cpu_time: 300,
502                        queue_time: 400,
503                        ..zx::TaskRuntimeInfo::default()
504                    },
505                ],
506            ),
507            None, /* histogram */
508            clock.clone(),
509        )
510        .unwrap();
511
512        let child_1 = Arc::new(Mutex::new(
513            TaskInfo::try_from(
514                FakeTask::new(
515                    2,
516                    vec![
517                        zx::TaskRuntimeInfo {
518                            cpu_time: 10,
519                            queue_time: 20,
520                            ..zx::TaskRuntimeInfo::default()
521                        },
522                        zx::TaskRuntimeInfo {
523                            cpu_time: 30,
524                            queue_time: 40,
525                            ..zx::TaskRuntimeInfo::default()
526                        },
527                    ],
528                ),
529                None, /* histogram */
530                clock.clone(),
531            )
532            .unwrap(),
533        ));
534
535        let child_2 = Arc::new(Mutex::new(
536            TaskInfo::try_from(
537                FakeTask::new(
538                    3,
539                    vec![
540                        zx::TaskRuntimeInfo {
541                            cpu_time: 5,
542                            queue_time: 2,
543                            ..zx::TaskRuntimeInfo::default()
544                        },
545                        zx::TaskRuntimeInfo {
546                            cpu_time: 15,
547                            queue_time: 4,
548                            ..zx::TaskRuntimeInfo::default()
549                        },
550                    ],
551                ),
552                None, /* histogram */
553                clock.clone(),
554            )
555            .unwrap(),
556        ));
557
558        task.add_child(Arc::downgrade(&child_1));
559        task.add_child(Arc::downgrade(&child_2));
560
561        {
562            let measurement = take_measurement_then_tick_clock(&mut task, &clock).unwrap();
563            assert_eq!(measurement.cpu_time().into_nanos(), 100 - 10 - 5);
564            assert_eq!(measurement.queue_time().into_nanos(), 200 - 20 - 2);
565        }
566        assert_eq!(child_1.lock().total_measurements(), 1);
567        assert_eq!(child_2.lock().total_measurements(), 1);
568
569        // Fake child 2 not being alive anymore. It should be removed.
570        {
571            let mut child_2_guard = child_2.lock();
572            child_2_guard.task = Arc::new(Mutex::new(TaskState::TerminatedAndMeasured));
573            child_2_guard.measurements =
574                MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, clock.clone());
575        }
576
577        assert_eq!(task.children.len(), 2);
578        {
579            let measurement = take_measurement_then_tick_clock(&mut task, &clock).unwrap();
580            assert_eq!(measurement.cpu_time().into_nanos(), 300 - 30);
581            assert_eq!(measurement.queue_time().into_nanos(), 400 - 40);
582        }
583
584        assert_eq!(task.children.len(), 1); // after measuring dead children are cleaned.
585        assert_eq!(child_1.lock().total_measurements(), 2);
586    }
587
588    type BucketPairs = Vec<(i64, i64)>;
589
590    use diagnostics_hierarchy::Property;
591
592    // Returns a list of <bucket index, count> for linear histogram buckets where count > 0.
593    async fn linear_histogram_non_zero_values(inspector: &inspect::Inspector) -> BucketPairs {
594        let mut output = vec![];
595        let hierarchy = inspector.get_diagnostics_hierarchy().await;
596        let histogram = hierarchy.get_property_by_path(&["foo"]).unwrap();
597        if let Property::UintArray(_, data) = histogram {
598            if let ArrayContent::LinearHistogram(LinearHistogram { counts, indexes, .. }) = data {
599                match indexes {
600                    None => {
601                        for (index, count) in counts.iter().enumerate() {
602                            if *count > 0 && *count <= i64::MAX as u64 {
603                                output.push((index as i64, *count as i64));
604                            }
605                        }
606                    }
607                    Some(indexes) => {
608                        for (index, count) in indexes.iter().zip(counts.iter()) {
609                            if *count > 0
610                                && *count <= i64::MAX as u64
611                                && *index <= i64::MAX as usize
612                            {
613                                output.push((*index as i64, *count as i64));
614                            }
615                        }
616                    }
617                }
618            }
619        }
620        output
621    }
622
623    fn fake_readings(id: u64, cpu_deltas: Vec<u64>) -> FakeTask {
624        let mut cpu_time = 0i64;
625        let mut readings = vec![];
626        for delta in cpu_deltas.iter() {
627            cpu_time += *delta as i64;
628            readings.push(zx::TaskRuntimeInfo { cpu_time, ..zx::TaskRuntimeInfo::default() })
629        }
630        FakeTask::new(id, readings)
631    }
632
633    // Test that the ceil function works: 0 cpu goes in bucket 0, 0.1..1 in bucket 1, etc.
634    #[fuchsia::test]
635    async fn bucket_cutoffs() {
636        let readings = fake_readings(1, vec![1, 0, 500, 989, 990, 991, 999, 0]);
637        let inspector = inspect::Inspector::default();
638        let clock = FakeTime::new();
639        let histogram =
640            create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
641        //assert_data_tree!(            inspector,            root: {});
642        let mut task = TaskInfo::try_from_internal(
643            readings,
644            Some(histogram),
645            Arc::new(clock.clone()),
646            std::time::Duration::from_nanos(1000),
647            1, /* cores */
648        )
649        .unwrap();
650
651        clock.add_ticks(1000);
652        task.measure_if_no_parent(); // 1
653        let answer = vec![(1, 1)];
654        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
655
656        clock.add_ticks(1000);
657        task.measure_if_no_parent(); // 0
658        let answer = vec![(0, 1), (1, 1)];
659        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
660
661        clock.add_ticks(1000);
662        task.measure_if_no_parent(); // 500
663        let answer = vec![(0, 1), (1, 1), (50, 1)];
664        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
665
666        clock.add_ticks(1000);
667        task.measure_if_no_parent(); // 989
668        let answer = vec![(0, 1), (1, 1), (50, 1), (99, 1)];
669        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
670
671        clock.add_ticks(1000);
672        task.measure_if_no_parent(); // 990
673        let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2)];
674        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
675
676        clock.add_ticks(1000);
677        task.measure_if_no_parent(); // 991
678        let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2), (100, 1)];
679        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
680
681        clock.add_ticks(1000);
682        task.measure_if_no_parent(); // 999
683        let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2), (100, 2)];
684        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
685
686        clock.add_ticks(1000);
687        task.measure_if_no_parent(); // 0...
688        let answer = vec![(0, 2), (1, 1), (50, 1), (99, 2), (100, 2)];
689        assert_eq!(linear_histogram_non_zero_values(&inspector).await, answer);
690    }
691
692    // Test that short time intervals (less than 90% of sample_period) are discarded.
693    // Extra-long intervals should be recorded. In all cases, CPU % should be calculated over the
694    // actual interval, not the sample_period.
695    #[fuchsia::test]
696    async fn discard_short_intervals() {
697        let readings = fake_readings(1, vec![100, 100, 100, 100]);
698        let inspector = inspect::Inspector::default();
699        let clock = FakeTime::new();
700        let histogram =
701            create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
702        let mut task = TaskInfo::try_from_internal(
703            readings,
704            Some(histogram),
705            Arc::new(clock.clone()),
706            std::time::Duration::from_nanos(1000),
707            1, /* cores */
708        )
709        .unwrap();
710
711        assert_eq!(linear_histogram_non_zero_values(&inspector).await, vec![]);
712
713        clock.add_ticks(900);
714        task.measure_if_no_parent();
715        assert_eq!(linear_histogram_non_zero_values(&inspector).await, vec![(12, 1)]);
716
717        clock.add_ticks(899);
718        task.measure_if_no_parent();
719        assert_eq!(linear_histogram_non_zero_values(&inspector).await, vec![(12, 1)]); // No change
720
721        clock.add_ticks(2000);
722        task.measure_if_no_parent();
723        assert_eq!(linear_histogram_non_zero_values(&inspector).await, (vec![(5, 1), (12, 1)]));
724
725        clock.add_ticks(1000);
726        task.measure_if_no_parent();
727        assert_eq!(
728            linear_histogram_non_zero_values(&inspector).await,
729            (vec![(5, 1), (10, 1), (12, 1)])
730        );
731    }
732
733    // Test that the CPU% takes the number of cores into account - that is, with N cores
734    // the CPU% should be 1/N the amount it would be for 1 core.
735    #[fuchsia::test]
736    async fn divide_by_cores() {
737        let readings = fake_readings(1, vec![400]);
738        let inspector = inspect::Inspector::default();
739        let clock = FakeTime::new();
740        let histogram =
741            create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
742        let mut task = TaskInfo::try_from_internal(
743            readings,
744            Some(histogram),
745            Arc::new(clock.clone()),
746            std::time::Duration::from_nanos(1000),
747            4, /* cores */
748        )
749        .unwrap();
750
751        assert_eq!(linear_histogram_non_zero_values(&inspector).await, vec![]);
752
753        clock.add_ticks(1000);
754        task.measure_if_no_parent();
755        assert_eq!(linear_histogram_non_zero_values(&inspector).await, vec![(10, 1)]);
756    }
757}