1use stats::bivariate::regression::Slope;
2use stats::bivariate::Data;
3use stats::univariate::outliers::tukey::LabeledSample;
4
5use estimate::{Distributions, Estimates, Statistic};
6use format;
7use stats::univariate::Sample;
8use stats::Distribution;
9use std::cell::Cell;
10use std::cmp;
11use std::collections::HashSet;
12use std::fmt;
13use std::io::stdout;
14use std::io::Write;
15use std::path::{Path, PathBuf};
16use Estimate;
17use {PlotConfiguration, Plotting, Throughput};
18
19const MAX_DIRECTORY_NAME_LEN: usize = 64;
20const MAX_TITLE_LEN: usize = 100;
21
22pub(crate) struct ComparisonData {
23 pub p_value: f64,
24 pub t_distribution: Distribution<f64>,
25 pub t_value: f64,
26 pub relative_estimates: Estimates,
27 pub relative_distributions: Distributions,
28 pub significance_threshold: f64,
29 pub noise_threshold: f64,
30 pub base_iter_counts: Vec<f64>,
31 pub base_sample_times: Vec<f64>,
32 pub base_avg_times: Vec<f64>,
33 pub base_estimates: Estimates,
34}
35
36pub(crate) struct MeasurementData<'a> {
37 pub data: Data<'a, f64, f64>,
38 pub avg_times: LabeledSample<'a, f64>,
39 pub absolute_estimates: Estimates,
40 pub distributions: Distributions,
41 pub comparison: Option<ComparisonData>,
42 pub throughput: Option<Throughput>,
43}
44impl<'a> MeasurementData<'a> {
45 pub fn iter_counts(&self) -> &Sample<f64> {
46 self.data.x()
47 }
48
49 pub fn sample_times(&self) -> &Sample<f64> {
50 self.data.y()
51 }
52}
53
54#[derive(Debug, Clone, Copy, Eq, PartialEq)]
55pub enum ValueType {
56 Bytes,
57 Elements,
58 Value,
59}
60
61#[derive(Clone, Serialize, Deserialize)]
62pub struct BenchmarkId {
63 pub group_id: String,
64 pub function_id: Option<String>,
65 pub value_str: Option<String>,
66 pub throughput: Option<Throughput>,
67 full_id: String,
68 directory_name: String,
69 title: String,
70}
71
72fn truncate_to_character_boundary(s: &mut String, max_len: usize) {
73 let mut boundary = cmp::min(max_len, s.len());
74 while !s.is_char_boundary(boundary) {
75 boundary -= 1;
76 }
77 s.truncate(boundary);
78}
79
80pub fn make_filename_safe(string: &str) -> String {
81 let mut string = string.replace(
82 &['?', '"', '/', '\\', '*', '<', '>', ':', '|', '^'][..],
83 "_",
84 );
85
86 if cfg!(target_os = "windows") {
88 string = string.to_lowercase();
89 }
90
91 truncate_to_character_boundary(&mut string, MAX_DIRECTORY_NAME_LEN);
93
94 string
95}
96
97impl BenchmarkId {
98 pub fn new(
99 group_id: String,
100 function_id: Option<String>,
101 value_str: Option<String>,
102 throughput: Option<Throughput>,
103 ) -> BenchmarkId {
104 let full_id = match (&function_id, &value_str) {
105 (&Some(ref func), &Some(ref val)) => format!("{}/{}/{}", group_id, func, val),
106 (&Some(ref func), &None) => format!("{}/{}", group_id, func),
107 (&None, &Some(ref val)) => format!("{}/{}", group_id, val),
108 (&None, &None) => group_id.clone(),
109 };
110
111 let mut title = full_id.clone();
112 truncate_to_character_boundary(&mut title, MAX_TITLE_LEN);
113 if title != full_id {
114 title.push_str("...");
115 }
116
117 let directory_name = match (&function_id, &value_str) {
118 (&Some(ref func), &Some(ref val)) => format!(
119 "{}/{}/{}",
120 make_filename_safe(&group_id),
121 make_filename_safe(func),
122 make_filename_safe(val)
123 ),
124 (&Some(ref func), &None) => format!(
125 "{}/{}",
126 make_filename_safe(&group_id),
127 make_filename_safe(func)
128 ),
129 (&None, &Some(ref val)) => format!(
130 "{}/{}",
131 make_filename_safe(&group_id),
132 make_filename_safe(val)
133 ),
134 (&None, &None) => make_filename_safe(&group_id),
135 };
136
137 BenchmarkId {
138 group_id,
139 function_id,
140 value_str,
141 throughput,
142 full_id,
143 directory_name,
144 title,
145 }
146 }
147
148 pub fn id(&self) -> &str {
149 &self.full_id
150 }
151
152 pub fn as_title(&self) -> &str {
153 &self.title
154 }
155
156 pub fn as_directory_name(&self) -> &str {
157 &self.directory_name
158 }
159
160 pub fn as_number(&self) -> Option<f64> {
161 match self.throughput {
162 Some(Throughput::Bytes(n)) | Some(Throughput::Elements(n)) => Some(f64::from(n)),
163 None => self
164 .value_str
165 .as_ref()
166 .and_then(|string| string.parse::<f64>().ok()),
167 }
168 }
169
170 pub fn value_type(&self) -> Option<ValueType> {
171 match self.throughput {
172 Some(Throughput::Bytes(_)) => Some(ValueType::Bytes),
173 Some(Throughput::Elements(_)) => Some(ValueType::Elements),
174 None => self
175 .value_str
176 .as_ref()
177 .and_then(|string| string.parse::<f64>().ok())
178 .map(|_| ValueType::Value),
179 }
180 }
181
182 pub fn ensure_directory_name_unique(&mut self, existing_directories: &HashSet<String>) {
183 if !existing_directories.contains(self.as_directory_name()) {
184 return;
185 }
186
187 let mut counter = 2;
188 loop {
189 let new_dir_name = format!("{}_{}", self.as_directory_name(), counter);
190 if !existing_directories.contains(&new_dir_name) {
191 self.directory_name = new_dir_name;
192 return;
193 }
194 counter += 1;
195 }
196 }
197
198 pub fn ensure_title_unique(&mut self, existing_titles: &HashSet<String>) {
199 if !existing_titles.contains(self.as_title()) {
200 return;
201 }
202
203 let mut counter = 2;
204 loop {
205 let new_title = format!("{} #{}", self.as_title(), counter);
206 if !existing_titles.contains(&new_title) {
207 self.title = new_title;
208 return;
209 }
210 counter += 1;
211 }
212 }
213}
214impl fmt::Display for BenchmarkId {
215 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
216 f.write_str(self.as_title())
217 }
218}
219impl fmt::Debug for BenchmarkId {
220 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
221 fn format_opt(opt: &Option<String>) -> String {
222 match *opt {
223 Some(ref string) => format!("\"{}\"", string),
224 None => "None".to_owned(),
225 }
226 }
227
228 write!(
229 f,
230 "BenchmarkId {{ group_id: \"{}\", function_id: {}, value_str: {}, throughput: {:?} }}",
231 self.group_id,
232 format_opt(&self.function_id),
233 format_opt(&self.value_str),
234 self.throughput,
235 )
236 }
237}
238
239pub struct ReportContext {
240 pub output_directory: String,
241 pub plotting: Plotting,
242 pub plot_config: PlotConfiguration,
243 pub test_mode: bool,
244}
245impl ReportContext {
246 pub fn report_path<P: AsRef<Path> + ?Sized>(&self, id: &BenchmarkId, file_name: &P) -> PathBuf {
247 let mut path = PathBuf::from(format!(
248 "{}/{}/report",
249 self.output_directory,
250 id.as_directory_name()
251 ));
252 path.push(file_name);
253 path
254 }
255}
256
257pub(crate) trait Report {
258 fn benchmark_start(&self, _id: &BenchmarkId, _context: &ReportContext) {}
259 fn profile(&self, _id: &BenchmarkId, _context: &ReportContext, _profile_ns: f64) {}
260 fn warmup(&self, _id: &BenchmarkId, _context: &ReportContext, _warmup_ns: f64) {}
261 fn terminated(&self, _id: &BenchmarkId, _context: &ReportContext) {}
262 fn analysis(&self, _id: &BenchmarkId, _context: &ReportContext) {}
263 fn measurement_start(
264 &self,
265 _id: &BenchmarkId,
266 _context: &ReportContext,
267 _sample_count: u64,
268 _estimate_ns: f64,
269 _iter_count: u64,
270 ) {
271 }
272 fn measurement_complete(
273 &self,
274 _id: &BenchmarkId,
275 _context: &ReportContext,
276 _measurements: &MeasurementData,
277 ) {
278 }
279 fn summarize(&self, _context: &ReportContext, _all_ids: &[BenchmarkId]) {}
280 fn final_summary(&self, _context: &ReportContext) {}
281}
282
283pub(crate) struct Reports {
284 reports: Vec<Box<Report>>,
285}
286impl Reports {
287 pub fn new(reports: Vec<Box<Report>>) -> Reports {
288 Reports { reports }
289 }
290}
291impl Report for Reports {
292 fn benchmark_start(&self, id: &BenchmarkId, context: &ReportContext) {
293 for report in &self.reports {
294 report.benchmark_start(id, context);
295 }
296 }
297
298 fn profile(&self, id: &BenchmarkId, context: &ReportContext, profile_ns: f64) {
299 for report in &self.reports {
300 report.profile(id, context, profile_ns);
301 }
302 }
303
304 fn warmup(&self, id: &BenchmarkId, context: &ReportContext, warmup_ns: f64) {
305 for report in &self.reports {
306 report.warmup(id, context, warmup_ns);
307 }
308 }
309
310 fn terminated(&self, id: &BenchmarkId, context: &ReportContext) {
311 for report in &self.reports {
312 report.terminated(id, context);
313 }
314 }
315
316 fn analysis(&self, id: &BenchmarkId, context: &ReportContext) {
317 for report in &self.reports {
318 report.analysis(id, context);
319 }
320 }
321
322 fn measurement_start(
323 &self,
324 id: &BenchmarkId,
325 context: &ReportContext,
326 sample_count: u64,
327 estimate_ns: f64,
328 iter_count: u64,
329 ) {
330 for report in &self.reports {
331 report.measurement_start(id, context, sample_count, estimate_ns, iter_count);
332 }
333 }
334
335 fn measurement_complete(
336 &self,
337 id: &BenchmarkId,
338 context: &ReportContext,
339 measurements: &MeasurementData,
340 ) {
341 for report in &self.reports {
342 report.measurement_complete(id, context, measurements);
343 }
344 }
345
346 fn summarize(&self, context: &ReportContext, all_ids: &[BenchmarkId]) {
347 for report in &self.reports {
348 report.summarize(context, all_ids);
349 }
350 }
351
352 fn final_summary(&self, context: &ReportContext) {
353 for report in &self.reports {
354 report.final_summary(context);
355 }
356 }
357}
358
359pub(crate) struct CliReport {
360 pub enable_text_overwrite: bool,
361 pub enable_text_coloring: bool,
362 pub verbose: bool,
363
364 last_line_len: Cell<usize>,
365}
366impl CliReport {
367 pub fn new(
368 enable_text_overwrite: bool,
369 enable_text_coloring: bool,
370 verbose: bool,
371 ) -> CliReport {
372 CliReport {
373 enable_text_overwrite,
374 enable_text_coloring,
375 verbose,
376
377 last_line_len: Cell::new(0),
378 }
379 }
380
381 fn text_overwrite(&self) {
382 if self.enable_text_overwrite {
383 print!("\r");
384 for _ in 0..self.last_line_len.get() {
385 print!(" ");
386 }
387 print!("\r");
388 }
389 }
390
391 #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
393 fn print_overwritable(&self, s: String) {
394 if self.enable_text_overwrite {
395 self.last_line_len.set(s.len());
396 print!("{}", s);
397 stdout().flush().unwrap();
398 } else {
399 println!("{}", s);
400 }
401 }
402
403 fn green(&self, s: String) -> String {
404 if self.enable_text_coloring {
405 format!("\x1B[32m{}\x1B[39m", s)
406 } else {
407 s
408 }
409 }
410
411 fn yellow(&self, s: String) -> String {
412 if self.enable_text_coloring {
413 format!("\x1B[33m{}\x1B[39m", s)
414 } else {
415 s
416 }
417 }
418
419 fn red(&self, s: String) -> String {
420 if self.enable_text_coloring {
421 format!("\x1B[31m{}\x1B[39m", s)
422 } else {
423 s
424 }
425 }
426
427 fn bold(&self, s: String) -> String {
428 if self.enable_text_coloring {
429 format!("\x1B[1m{}\x1B[22m", s)
430 } else {
431 s
432 }
433 }
434
435 fn faint(&self, s: String) -> String {
436 if self.enable_text_coloring {
437 format!("\x1B[2m{}\x1B[22m", s)
438 } else {
439 s
440 }
441 }
442
443 pub fn outliers(&self, sample: &LabeledSample<f64>) {
444 let (los, lom, _, him, his) = sample.count();
445 let noutliers = los + lom + him + his;
446 let sample_size = sample.len();
447
448 if noutliers == 0 {
449 return;
450 }
451
452 let percent = |n: usize| 100. * n as f64 / sample_size as f64;
453
454 println!(
455 "{}",
456 self.yellow(format!(
457 "Found {} outliers among {} measurements ({:.2}%)",
458 noutliers,
459 sample_size,
460 percent(noutliers)
461 ))
462 );
463
464 let print = |n, label| {
465 if n != 0 {
466 println!(" {} ({:.2}%) {}", n, percent(n), label);
467 }
468 };
469
470 print(los, "low severe");
471 print(lom, "low mild");
472 print(him, "high mild");
473 print(his, "high severe");
474 }
475}
476impl Report for CliReport {
477 fn benchmark_start(&self, id: &BenchmarkId, ctx: &ReportContext) {
478 if ctx.test_mode {
479 println!("Testing {}", id);
480 } else {
481 self.print_overwritable(format!("Benchmarking {}", id));
482 }
483 }
484
485 fn profile(&self, id: &BenchmarkId, _: &ReportContext, warmup_ns: f64) {
486 self.text_overwrite();
487 self.print_overwritable(format!(
488 "Benchmarking {}: Profiling for {}",
489 id,
490 format::time(warmup_ns)
491 ));
492 }
493
494 fn warmup(&self, id: &BenchmarkId, _: &ReportContext, warmup_ns: f64) {
495 self.text_overwrite();
496 self.print_overwritable(format!(
497 "Benchmarking {}: Warming up for {}",
498 id,
499 format::time(warmup_ns)
500 ));
501 }
502
503 fn terminated(&self, id: &BenchmarkId, ctx: &ReportContext) {
504 if ctx.test_mode {
505 println!("Success");
506 } else {
507 self.text_overwrite();
508 println!("Benchmarking {}: Complete (Analysis Disabled)", id);
509 }
510 }
511
512 fn analysis(&self, id: &BenchmarkId, _: &ReportContext) {
513 self.text_overwrite();
514 self.print_overwritable(format!("Benchmarking {}: Analyzing", id));
515 }
516
517 fn measurement_start(
518 &self,
519 id: &BenchmarkId,
520 _: &ReportContext,
521 sample_count: u64,
522 estimate_ns: f64,
523 iter_count: u64,
524 ) {
525 self.text_overwrite();
526 let iter_string = if self.verbose {
527 format!("{} iterations", iter_count)
528 } else {
529 format::iter_count(iter_count)
530 };
531
532 self.print_overwritable(format!(
533 "Benchmarking {}: Collecting {} samples in estimated {} ({})",
534 id,
535 sample_count,
536 format::time(estimate_ns),
537 iter_string
538 ));
539 }
540
541 fn measurement_complete(&self, id: &BenchmarkId, _: &ReportContext, meas: &MeasurementData) {
542 self.text_overwrite();
543
544 let slope_estimate = meas.absolute_estimates[&Statistic::Slope];
545
546 {
547 let mut id = id.as_title().to_owned();
548
549 if id.len() > 23 {
550 println!("{}", self.green(id.clone()));
551 id.clear();
552 }
553 let id_len = id.len();
554
555 println!(
556 "{}{}time: [{} {} {}]",
557 self.green(id),
558 " ".repeat(24 - id_len),
559 self.faint(format::time(slope_estimate.confidence_interval.lower_bound)),
560 self.bold(format::time(slope_estimate.point_estimate)),
561 self.faint(format::time(slope_estimate.confidence_interval.upper_bound))
562 );
563 }
564
565 if let Some(ref throughput) = meas.throughput {
566 println!(
567 "{}thrpt: [{} {} {}]",
568 " ".repeat(24),
569 self.faint(format::throughput(
570 throughput,
571 slope_estimate.confidence_interval.upper_bound
572 )),
573 self.bold(format::throughput(
574 throughput,
575 slope_estimate.point_estimate
576 )),
577 self.faint(format::throughput(
578 throughput,
579 slope_estimate.confidence_interval.lower_bound
580 )),
581 )
582 }
583
584 if let Some(ref comp) = meas.comparison {
585 let different_mean = comp.p_value < comp.significance_threshold;
586 let mean_est = comp.relative_estimates[&Statistic::Mean];
587 let point_estimate = mean_est.point_estimate;
588 let mut point_estimate_str = format::change(point_estimate, true);
589 let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
592 let mut thrpt_point_estimate_str =
593 format::change(to_thrpt_estimate(point_estimate), true);
594 let explanation_str: String;
595
596 if !different_mean {
597 explanation_str = "No change in performance detected.".to_owned();
598 } else {
599 let comparison = compare_to_threshold(&mean_est, comp.noise_threshold);
600 match comparison {
601 ComparisonResult::Improved => {
602 point_estimate_str = self.green(self.bold(point_estimate_str));
603 thrpt_point_estimate_str = self.green(self.bold(thrpt_point_estimate_str));
604 explanation_str =
605 format!("Performance has {}.", self.green("improved".to_owned()));
606 }
607 ComparisonResult::Regressed => {
608 point_estimate_str = self.red(self.bold(point_estimate_str));
609 thrpt_point_estimate_str = self.red(self.bold(thrpt_point_estimate_str));
610 explanation_str =
611 format!("Performance has {}.", self.red("regressed".to_owned()));
612 }
613 ComparisonResult::NonSignificant => {
614 explanation_str = "Change within noise threshold.".to_owned();
615 }
616 }
617 }
618
619 if meas.throughput.is_some() {
620 println!("{}change:", " ".repeat(17));
621
622 println!(
623 "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
624 " ".repeat(24),
625 self.faint(format::change(
626 mean_est.confidence_interval.lower_bound,
627 true
628 )),
629 point_estimate_str,
630 self.faint(format::change(
631 mean_est.confidence_interval.upper_bound,
632 true
633 )),
634 comp.p_value,
635 if different_mean { "<" } else { ">" },
636 comp.significance_threshold
637 );
638 println!(
639 "{}thrpt: [{} {} {}]",
640 " ".repeat(24),
641 self.faint(format::change(
642 to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
643 true
644 )),
645 thrpt_point_estimate_str,
646 self.faint(format::change(
647 to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
648 true
649 )),
650 );
651 } else {
652 println!(
653 "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
654 " ".repeat(24),
655 self.faint(format::change(
656 mean_est.confidence_interval.lower_bound,
657 true
658 )),
659 point_estimate_str,
660 self.faint(format::change(
661 mean_est.confidence_interval.upper_bound,
662 true
663 )),
664 comp.p_value,
665 if different_mean { "<" } else { ">" },
666 comp.significance_threshold
667 );
668 }
669
670 println!("{}{}", " ".repeat(24), explanation_str);
671 }
672
673 self.outliers(&meas.avg_times);
674
675 if self.verbose {
676 fn format_short_estimate(estimate: &Estimate) -> String {
677 format!(
678 "[{} {}]",
679 format::time(estimate.confidence_interval.lower_bound),
680 format::time(estimate.confidence_interval.upper_bound)
681 )
682 }
683
684 let data = &meas.data;
685 let slope_estimate = &meas.absolute_estimates[&Statistic::Slope];
686
687 println!(
688 "{:<7}{} {:<15}[{:0.7} {:0.7}]",
689 "slope",
690 format_short_estimate(slope_estimate),
691 "R^2",
692 Slope(slope_estimate.confidence_interval.lower_bound).r_squared(data),
693 Slope(slope_estimate.confidence_interval.upper_bound).r_squared(data),
694 );
695 println!(
696 "{:<7}{} {:<15}{}",
697 "mean",
698 format_short_estimate(&meas.absolute_estimates[&Statistic::Mean]),
699 "std. dev.",
700 format_short_estimate(&meas.absolute_estimates[&Statistic::StdDev]),
701 );
702 println!(
703 "{:<7}{} {:<15}{}",
704 "median",
705 format_short_estimate(&meas.absolute_estimates[&Statistic::Median]),
706 "med. abs. dev.",
707 format_short_estimate(&meas.absolute_estimates[&Statistic::MedianAbsDev]),
708 );
709 }
710 }
711}
712
713enum ComparisonResult {
714 Improved,
715 Regressed,
716 NonSignificant,
717}
718
719fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult {
720 let ci = estimate.confidence_interval;
721 let lb = ci.lower_bound;
722 let ub = ci.upper_bound;
723
724 if lb < -noise && ub < -noise {
725 ComparisonResult::Improved
726 } else if lb > noise && ub > noise {
727 ComparisonResult::Regressed
728 } else {
729 ComparisonResult::NonSignificant
730 }
731}
732
733#[cfg(test)]
734mod test {
735 use super::*;
736
737 #[test]
738 fn test_make_filename_safe_replaces_characters() {
739 let input = "?/\\*\"";
740 let safe = make_filename_safe(input);
741 assert_eq!("_____", &safe);
742 }
743
744 #[test]
745 fn test_make_filename_safe_truncates_long_strings() {
746 let input = "this is a very long string. it is too long to be safe as a directory name, and so it needs to be truncated. what a long string this is.";
747 let safe = make_filename_safe(input);
748 assert!(input.len() > MAX_DIRECTORY_NAME_LEN);
749 assert_eq!(&input[0..MAX_DIRECTORY_NAME_LEN], &safe);
750 }
751
752 #[test]
753 fn test_make_filename_safe_respects_character_boundaries() {
754 let input = "✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓✓";
755 let safe = make_filename_safe(input);
756 assert!(safe.len() < MAX_DIRECTORY_NAME_LEN);
757 }
758
759 #[test]
760 fn test_benchmark_id_make_directory_name_unique() {
761 let existing_id = BenchmarkId::new(
762 "group".to_owned(),
763 Some("function".to_owned()),
764 Some("value".to_owned()),
765 None,
766 );
767 let mut directories = HashSet::new();
768 directories.insert(existing_id.as_directory_name().to_owned());
769
770 let mut new_id = existing_id.clone();
771 new_id.ensure_directory_name_unique(&directories);
772 assert_eq!("group/function/value_2", new_id.as_directory_name());
773 directories.insert(new_id.as_directory_name().to_owned());
774
775 new_id = existing_id.clone();
776 new_id.ensure_directory_name_unique(&directories);
777 assert_eq!("group/function/value_3", new_id.as_directory_name());
778 directories.insert(new_id.as_directory_name().to_owned());
779 }
780}