criterion/
lib.rs

1//! A statistics-driven micro-benchmarking library written in Rust.
2//!
3//! This crate is a microbenchmarking library which aims to provide strong
4//! statistical confidence in detecting and estimating the size of performance
5//! improvements and regressions, whle also being easy to use.
6//!
7//! See
8//! [the user guide](https://bheisler.github.io/criterion.rs/book/index.html)
9//! for examples as well as details on the measurement and analysis process,
10//! and the output.
11//!
12//! ## Features:
13//! * Benchmark Rust code as well as external programs
14//! * Collects detailed statistics, providing strong confidence that changes
15//!   to performance are real, not measurement noise
16//! * Produces detailed charts, providing thorough understanding of your code's
17//!   performance behavior.
18
19#![deny(missing_docs)]
20#![cfg_attr(feature = "real_blackbox", feature(test))]
21#![cfg_attr(not(feature = "html_reports"), allow(dead_code))]
22#![cfg_attr(
23    feature = "cargo-clippy",
24    allow(
25        clippy::used_underscore_binding,
26        clippy::just_underscores_and_digits,
27        clippy::transmute_ptr_to_ptr
28    )
29)]
30
31#[cfg(test)]
32#[macro_use]
33extern crate approx;
34
35#[cfg(test)]
36#[macro_use]
37extern crate quickcheck;
38
39#[cfg(test)]
40extern crate rand;
41
42#[macro_use]
43extern crate clap;
44
45#[macro_use]
46extern crate lazy_static;
47
48extern crate atty;
49extern crate cast;
50extern crate csv;
51extern crate itertools;
52extern crate num_traits;
53extern crate rand_core;
54extern crate rand_os;
55extern crate rand_xoshiro;
56extern crate rayon;
57extern crate serde;
58extern crate serde_json;
59extern crate walkdir;
60
61#[cfg(feature = "html_reports")]
62extern crate criterion_plot;
63
64#[cfg(feature = "html_reports")]
65extern crate tinytemplate;
66
67#[cfg(feature = "real_blackbox")]
68extern crate test;
69
70#[macro_use]
71extern crate serde_derive;
72
73// Needs to be declared before other modules
74// in order to be usable there.
75#[macro_use]
76mod macros_private;
77#[macro_use]
78mod analysis;
79mod benchmark;
80mod csv_report;
81mod error;
82mod estimate;
83mod format;
84mod fs;
85mod macros;
86mod program;
87mod report;
88mod routine;
89mod stats;
90
91#[cfg(feature = "html_reports")]
92mod kde;
93
94#[cfg(feature = "html_reports")]
95mod plot;
96
97#[cfg(feature = "html_reports")]
98mod html;
99
100use std::cell::RefCell;
101use std::collections::BTreeMap;
102use std::default::Default;
103use std::fmt;
104use std::iter::IntoIterator;
105use std::process::Command;
106use std::time::{Duration, Instant};
107
108use benchmark::BenchmarkConfig;
109use benchmark::NamedRoutine;
110use csv_report::FileCsvReport;
111use estimate::{Distributions, Estimates, Statistic};
112use plotting::Plotting;
113use report::{CliReport, Report, ReportContext, Reports};
114use routine::Function;
115
116#[cfg(feature = "html_reports")]
117use html::Html;
118
119pub use benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
120
121lazy_static! {
122    static ref DEBUG_ENABLED: bool = { std::env::vars().any(|(key, _)| key == "CRITERION_DEBUG") };
123}
124
125fn debug_enabled() -> bool {
126    *DEBUG_ENABLED
127}
128
129// Fake function which shows a deprecation warning when compiled without the html_reports feature.
130#[cfg(not(feature = "html_reports"))]
131#[cfg_attr(not(feature = "html_reports"), doc(hidden))]
132pub fn deprecation_warning() {
133    #[deprecated(
134        since = "0.2.6",
135        note = "The html_reports cargo feature is deprecated. As of 0.3.0, HTML reports will no longer be optional."
136    )]
137    fn deprecation_warning_inner() {}
138
139    deprecation_warning_inner()
140}
141
142/// A function that is opaque to the optimizer, used to prevent the compiler from
143/// optimizing away computations in a benchmark.
144///
145/// This variant is backed by the (unstable) test::black_box function.
146#[cfg(feature = "real_blackbox")]
147pub fn black_box<T>(dummy: T) -> T {
148    test::black_box(dummy)
149}
150
151/// A function that is opaque to the optimizer, used to prevent the compiler from
152/// optimizing away computations in a benchmark.
153///
154/// This variant is stable-compatible, but it may cause some performance overhead
155/// or fail to prevent code from being eliminated.
156#[cfg(not(feature = "real_blackbox"))]
157pub fn black_box<T>(dummy: T) -> T {
158    unsafe {
159        let ret = std::ptr::read_volatile(&dummy);
160        std::mem::forget(dummy);
161        ret
162    }
163}
164
165/// Representing a function to benchmark together with a name of that function.
166/// Used together with `bench_functions` to represent one out of multiple functions
167/// under benchmark.
168pub struct Fun<I: fmt::Debug> {
169    f: NamedRoutine<I>,
170}
171
172impl<I> Fun<I>
173where
174    I: fmt::Debug + 'static,
175{
176    /// Create a new `Fun` given a name and a closure
177    pub fn new<F>(name: &str, f: F) -> Fun<I>
178    where
179        F: FnMut(&mut Bencher, &I) + 'static,
180    {
181        let routine = NamedRoutine {
182            id: name.to_owned(),
183            f: Box::new(RefCell::new(Function::new(f))),
184        };
185
186        Fun { f: routine }
187    }
188}
189
190/// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
191/// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
192/// batch size.
193///
194/// Generally speaking, almost all benchmarks should use `SmallInput`. If the input or the result
195/// of the benchmark routine is large enough that `SmallInput` causes out-of-memory errors,
196/// `LargeInput` can be used to reduce memory usage at the cost of increasing the measurement
197/// overhead. If the input or the result is extremely large (or if it holds some
198/// limited external resource like a file handle), `PerIteration` will set the number of iterations
199/// per batch to exactly one. `PerIteration` can increase the measurement overhead substantially
200/// and should be avoided wherever possible.
201///
202/// Each value lists an estimate of the measurement overhead. This is intended as a rough guide
203/// to assist in choosing an option, it should not be relied upon. In particular, it is not valid
204/// to subtract the listed overhead from the measurement and assume that the result represents the
205/// true runtime of a function. The actual measurement overhead for your specific benchmark depends
206/// on the details of the function you're benchmarking and the hardware and operating
207/// system running the benchmark.
208///
209/// With that said, if the runtime of your function is small relative to the measurement overhead
210/// it will be difficult to take accurate measurements. In this situation, the best option is to use
211/// [`Bencher::iter`](struct.Bencher.html#method.iter_batched_ref) which has next-to-zero measurement
212/// overhead.
213#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
214pub enum BatchSize {
215    /// `SmallInput` indicates that the input to the benchmark routine (the value returned from
216    /// the setup routine) is small enough that millions of values can be safely held in memory.
217    /// Always prefer `SmallInput` unless the benchmark is using too much memory.
218    ///
219    /// In testing, the maximum measurement overhead from benchmarking with `SmallInput` is on the
220    /// order of 500 picoseconds. This is presented as a rough guide; your results may vary.
221    SmallInput,
222
223    /// `LargeInput` indicates that the input to the benchmark routine or the value returned from
224    /// that routine is large. This will reduce the memory usage but increase the measurement
225    /// overhead.
226    ///
227    /// In testing, the maximum measurement overhead from benchmarking with `LargeInput` is on the
228    /// order of 750 picoseconds. This is presented as a rough guide; your results may vary.
229    LargeInput,
230
231    /// `PerIteration` indicates that the input to the benchmark routine or the value returned from
232    /// that routine is extremely large or holds some limited resource, such that holding many values
233    /// in memory at once is infeasible. This provides the worst measurement overhead, but the
234    /// lowest memory usage.
235    ///
236    /// In testing, the maximum measurement overhead from benchmarking with `PerIteration` is on the
237    /// order of 350 nanoseconds or 350,000 picoseconds. This is presented as a rough guide; your
238    /// results may vary.
239    PerIteration,
240
241    /// `NumBatches` will attempt to divide the iterations up into a given number of batches.
242    /// A larger number of batches (and thus smaller batches) will reduce memory usage but increase
243    /// measurement overhead. This allows the user to choose their own tradeoff between memory usage
244    /// and measurement overhead, but care must be taken in tuning the number of batches. Most
245    /// benchmarks should use `SmallInput` or `LargeInput` instead.
246    NumBatches(u64),
247
248    /// `NumIterations` fixes the batch size to a constant number, specified by the user. This
249    /// allows the user to choose their own tradeoff between overhead and memory usage, but care must
250    /// be taken in tuning the batch size. In general, the measurement overhead of NumIterations
251    /// will be larger than that of `NumBatches`. Most benchmarks should use `SmallInput` or
252    /// `LargeInput` instead.
253    NumIterations(u64),
254
255    #[doc(hidden)]
256    __NonExhaustive,
257}
258impl BatchSize {
259    /// Convert to a number of iterations per batch.
260    ///
261    /// We try to do a constant number of batches regardless of the number of iterations in this
262    /// sample. If the measurement overhead is roughly constant regardless of the number of
263    /// iterations the analysis of the results later will have an easier time separating the
264    /// measurement overhead from the benchmark time.
265    fn iters_per_batch(self, iters: u64) -> u64 {
266        match self {
267            BatchSize::SmallInput => (iters + 10 - 1) / 10,
268            BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
269            BatchSize::PerIteration => 1,
270            BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
271            BatchSize::NumIterations(size) => size,
272            BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
273        }
274    }
275}
276
277/// Timer struct to iterate a benchmarked function and measure the runtime.
278///
279/// This struct provides different timing loops as methods. Each timing loop provides a different
280/// way to time a routine and each has advantages and disadvantages.
281///
282/// * If your routine requires no per-iteration setup and returns a value with an expensive `drop`
283///   method, use `iter_with_large_drop`.
284/// * If your routine requires some per-iteration setup that shouldn't be timed, use `iter_batched`
285///   or `iter_batched_ref`. See [`BatchSize`](enum.BatchSize.html) for a discussion of batch sizes.
286///   If the setup value implements `Drop` and you don't want to include the `drop` time in the
287///   measurement, use `iter_batched_ref`, otherwise use `iter_batched`. These methods are also
288///   suitable for benchmarking routines which return a value with an expensive `drop` method,
289///   but are more complex than `iter_with_large_drop`.
290/// * Otherwise, use `iter`.
291#[derive(Clone, Copy)]
292pub struct Bencher {
293    iterated: bool,
294    iters: u64,
295    elapsed: Duration,
296}
297impl Bencher {
298    /// Times a `routine` by executing it many times and timing the total elapsed time.
299    ///
300    /// Prefer this timing loop when `routine` returns a value that doesn't have a destructor.
301    ///
302    /// # Timing model
303    ///
304    /// Note that the `Bencher` also times the time required to destroy the output of `routine()`.
305    /// Therefore prefer this timing loop when the runtime of `mem::drop(O)` is negligible compared
306    /// to the runtime of the `routine`.
307    ///
308    /// ```text
309    /// elapsed = Instant::now + iters * (routine + mem::drop(O) + Range::next)
310    /// ```
311    ///
312    /// # Example
313    ///
314    /// ```rust
315    /// #[macro_use] extern crate criterion;
316    ///
317    /// use criterion::*;
318    ///
319    /// // The function to benchmark
320    /// fn foo() {
321    ///     // ...
322    /// }
323    ///
324    /// fn bench(c: &mut Criterion) {
325    ///     c.bench_function("iter", move |b| {
326    ///         b.iter(|| foo())
327    ///     });
328    /// }
329    ///
330    /// criterion_group!(benches, bench);
331    /// criterion_main!(benches);
332    /// ```
333    ///
334    #[inline(never)]
335    pub fn iter<O, R>(&mut self, mut routine: R)
336    where
337        R: FnMut() -> O,
338    {
339        self.iterated = true;
340        let start = Instant::now();
341        for _ in 0..self.iters {
342            black_box(routine());
343        }
344        self.elapsed = start.elapsed();
345    }
346
347    #[doc(hidden)]
348    pub fn iter_with_setup<I, O, S, R>(&mut self, setup: S, routine: R)
349    where
350        S: FnMut() -> I,
351        R: FnMut(I) -> O,
352    {
353        self.iter_batched(setup, routine, BatchSize::PerIteration);
354    }
355
356    /// Times a `routine` by collecting its output on each iteration. This avoids timing the
357    /// destructor of the value returned by `routine`.
358    ///
359    /// WARNING: This requires `O(iters * mem::size_of::<O>())` of memory, and `iters` is not under the
360    /// control of the caller. If this causes out-of-memory errors, use `iter_batched` instead.
361    ///
362    /// # Timing model
363    ///
364    /// ``` text
365    /// elapsed = Instant::now + iters * (routine) + Iterator::collect::<Vec<_>>
366    /// ```
367    ///
368    /// # Example
369    ///
370    /// ```rust
371    /// #[macro_use] extern crate criterion;
372    ///
373    /// use criterion::*;
374    ///
375    /// fn create_vector() -> Vec<u64> {
376    ///     # vec![]
377    ///     // ...
378    /// }
379    ///
380    /// fn bench(c: &mut Criterion) {
381    ///     c.bench_function("with_drop", move |b| {
382    ///         // This will avoid timing the Vec::drop.
383    ///         b.iter_with_large_drop(|| create_vector())
384    ///     });
385    /// }
386    ///
387    /// criterion_group!(benches, bench);
388    /// criterion_main!(benches);
389    /// ```
390    ///
391    #[doc(hidden)]
392    pub fn iter_with_large_drop<O, R>(&mut self, mut routine: R)
393    where
394        R: FnMut() -> O,
395    {
396        self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
397    }
398
399    #[doc(hidden)]
400    pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
401    where
402        S: FnMut() -> I,
403        R: FnMut(I) -> O,
404    {
405        self.iter_batched(setup, routine, BatchSize::NumBatches(1));
406    }
407
408    /// Times a `routine` that requires some input by generating a batch of input, then timing the
409    /// iteration of the benchmark over the input. See [`BatchSize`](struct.BatchSize.html) for
410    /// details on choosing the batch size. Use this when the routine must consume its input.
411    ///
412    /// For example, use this loop to benchmark sorting algorithms, because they require unsorted
413    /// data on each iteration.
414    ///
415    /// # Timing model
416    ///
417    /// ```text
418    /// elapsed = (Instant::now * num_batches) + (iters * (routine + O::drop)) + Vec::extend
419    /// ```
420    ///
421    /// # Example
422    ///
423    /// ```rust
424    /// #[macro_use] extern crate criterion;
425    ///
426    /// use criterion::*;
427    ///
428    /// fn create_scrambled_data() -> Vec<u64> {
429    ///     # vec![]
430    ///     // ...
431    /// }
432    ///
433    /// // The sorting algorithm to test
434    /// fn sort(data: &mut [u64]) {
435    ///     // ...
436    /// }
437    ///
438    /// fn bench(c: &mut Criterion) {
439    ///     let data = create_scrambled_data();
440    ///
441    ///     c.bench_function("with_setup", move |b| {
442    ///         // This will avoid timing the to_vec call.
443    ///         b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput)
444    ///     });
445    /// }
446    ///
447    /// criterion_group!(benches, bench);
448    /// criterion_main!(benches);
449    /// ```
450    ///
451    #[inline(never)]
452    pub fn iter_batched<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
453    where
454        S: FnMut() -> I,
455        R: FnMut(I) -> O,
456    {
457        self.iterated = true;
458        let batch_size = size.iters_per_batch(self.iters);
459        assert!(batch_size != 0, "Batch size must not be zero.");
460        self.elapsed = Duration::from_secs(0);
461
462        if batch_size == 1 {
463            for _ in 0..self.iters {
464                let mut input = black_box(setup());
465
466                let start = Instant::now();
467                let output = routine(input);
468                self.elapsed += start.elapsed();
469
470                drop(black_box(output));
471            }
472        } else {
473            let mut iteration_counter = 0;
474
475            while iteration_counter < self.iters {
476                let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
477
478                let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
479                let mut outputs = Vec::with_capacity(batch_size as usize);
480
481                let start = Instant::now();
482                outputs.extend(inputs.into_iter().map(&mut routine));
483                self.elapsed += start.elapsed();
484
485                black_box(outputs);
486
487                iteration_counter += batch_size;
488            }
489        }
490    }
491
492    /// Times a `routine` that requires some input by generating a batch of input, then timing the
493    /// iteration of the benchmark over the input. See [`BatchSize`](struct.BatchSize.html) for
494    /// details on choosing the batch size. Use this when the routine should accept the input by
495    /// mutable reference.
496    ///
497    /// For example, use this loop to benchmark sorting algorithms, because they require unsorted
498    /// data on each iteration.
499    ///
500    /// # Timing model
501    ///
502    /// ```text
503    /// elapsed = (Instant::now * num_batches) + (iters * routine) + Vec::extend
504    /// ```
505    ///
506    /// # Example
507    ///
508    /// ```rust
509    /// #[macro_use] extern crate criterion;
510    ///
511    /// use criterion::*;
512    ///
513    /// fn create_scrambled_data() -> Vec<u64> {
514    ///     # vec![]
515    ///     // ...
516    /// }
517    ///
518    /// // The sorting algorithm to test
519    /// fn sort(data: &mut [u64]) {
520    ///     // ...
521    /// }
522    ///
523    /// fn bench(c: &mut Criterion) {
524    ///     let data = create_scrambled_data();
525    ///
526    ///     c.bench_function("with_setup", move |b| {
527    ///         // This will avoid timing the to_vec call.
528    ///         b.iter_batched(|| data.clone(), |mut data| sort(&mut data), BatchSize::SmallInput)
529    ///     });
530    /// }
531    ///
532    /// criterion_group!(benches, bench);
533    /// criterion_main!(benches);
534    /// ```
535    ///
536    #[inline(never)]
537    pub fn iter_batched_ref<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
538    where
539        S: FnMut() -> I,
540        R: FnMut(&mut I) -> O,
541    {
542        self.iterated = true;
543        let batch_size = size.iters_per_batch(self.iters);
544        assert!(batch_size != 0, "Batch size must not be zero.");
545        self.elapsed = Duration::from_secs(0);
546
547        if batch_size == 1 {
548            for _ in 0..self.iters {
549                let mut input = black_box(setup());
550
551                let start = Instant::now();
552                let output = routine(&mut input);
553                self.elapsed += start.elapsed();
554
555                drop(black_box(output));
556                drop(black_box(input));
557            }
558        } else {
559            let mut iteration_counter = 0;
560
561            while iteration_counter < self.iters {
562                let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
563
564                let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
565                let mut outputs = Vec::with_capacity(batch_size as usize);
566
567                let start = Instant::now();
568                outputs.extend(inputs.iter_mut().map(&mut routine));
569                self.elapsed += start.elapsed();
570
571                black_box(outputs);
572
573                iteration_counter += batch_size;
574            }
575        }
576    }
577
578    // Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly
579    // if they don't.
580    fn assert_iterated(&mut self) {
581        if !self.iterated {
582            panic!("Benchmark function must call Bencher::iter or related method.");
583        }
584        self.iterated = false;
585    }
586}
587
588/// Baseline describes how the baseline_directory is handled.
589pub enum Baseline {
590    /// Compare ensures a previous saved version of the baseline
591    /// exists and runs comparison against that.
592    Compare,
593    /// Save writes the benchmark results to the baseline directory,
594    /// overwriting any results that were previously there.
595    Save,
596}
597
598/// The benchmark manager
599///
600/// `Criterion` lets you configure and execute benchmarks
601///
602/// Each benchmark consists of four phases:
603///
604/// - **Warm-up**: The routine is repeatedly executed, to let the CPU/OS/JIT/interpreter adapt to
605/// the new load
606/// - **Measurement**: The routine is repeatedly executed, and timing information is collected into
607/// a sample
608/// - **Analysis**: The sample is analyzed and distiled into meaningful statistics that get
609/// reported to stdout, stored in files, and plotted
610/// - **Comparison**: The current sample is compared with the sample obtained in the previous
611/// benchmark.
612pub struct Criterion {
613    config: BenchmarkConfig,
614    plotting: Plotting,
615    filter: Option<String>,
616    report: Box<Report>,
617    output_directory: String,
618    baseline_directory: String,
619    baseline: Baseline,
620    profile_time: Option<Duration>,
621    test_mode: bool,
622    list_mode: bool,
623}
624
625impl Default for Criterion {
626    /// Creates a benchmark manager with the following default settings:
627    ///
628    /// - Sample size: 100 measurements
629    /// - Warm-up time: 3 s
630    /// - Measurement time: 5 s
631    /// - Bootstrap size: 100 000 resamples
632    /// - Noise threshold: 0.01 (1%)
633    /// - Confidence level: 0.95
634    /// - Significance level: 0.05
635    /// - Plotting: enabled (if gnuplot is available)
636    /// - No filter
637    fn default() -> Criterion {
638        #[allow(unused_mut, unused_assignments)]
639        let mut plotting = Plotting::Unset;
640
641        let mut reports: Vec<Box<Report>> = vec![];
642        reports.push(Box::new(CliReport::new(false, false, false)));
643        reports.push(Box::new(FileCsvReport));
644
645        let output_directory =
646            match std::env::vars().find(|&(ref key, _)| key == "CARGO_TARGET_DIR") {
647                Some((_, value)) => format!("{}/criterion", value),
648                None => "target/criterion".to_owned(),
649            };
650
651        Criterion {
652            config: BenchmarkConfig {
653                confidence_level: 0.95,
654                measurement_time: Duration::new(5, 0),
655                noise_threshold: 0.01,
656                nresamples: 100_000,
657                sample_size: 100,
658                significance_level: 0.05,
659                warm_up_time: Duration::new(3, 0),
660            },
661            plotting,
662            filter: None,
663            report: Box::new(Reports::new(reports)),
664            baseline_directory: "base".to_owned(),
665            baseline: Baseline::Save,
666            profile_time: None,
667            test_mode: false,
668            list_mode: false,
669            output_directory,
670        }
671    }
672}
673
674impl Criterion {
675    /// Changes the default size of the sample for benchmarks run with this runner.
676    ///
677    /// A bigger sample should yield more accurate results if paired with a sufficiently large
678    /// measurement time.
679    ///
680    /// Sample size must be at least 2.
681    ///
682    /// # Panics
683    ///
684    /// Panics if set to zero or one
685    pub fn sample_size(mut self, n: usize) -> Criterion {
686        assert!(n >= 2);
687        if n < 10 {
688            println!("Warning: Sample sizes < 10 will be disallowed in Criterion.rs 0.3.0.");
689        }
690
691        self.config.sample_size = n;
692        self
693    }
694
695    /// Changes the default warm up time for benchmarks run with this runner.
696    ///
697    /// # Panics
698    ///
699    /// Panics if the input duration is zero
700    pub fn warm_up_time(mut self, dur: Duration) -> Criterion {
701        assert!(dur.to_nanos() > 0);
702
703        self.config.warm_up_time = dur;
704        self
705    }
706
707    /// Changes the default measurement time for benchmarks run with this runner.
708    ///
709    /// With a longer time, the measurement will become more resilient to transitory peak loads
710    /// caused by external programs
711    ///
712    /// **Note**: If the measurement time is too "low", Criterion will automatically increase it
713    ///
714    /// # Panics
715    ///
716    /// Panics if the input duration in zero
717    pub fn measurement_time(mut self, dur: Duration) -> Criterion {
718        assert!(dur.to_nanos() > 0);
719
720        self.config.measurement_time = dur;
721        self
722    }
723
724    /// Changes the default number of resamples for benchmarks run with this runner.
725    ///
726    /// Number of resamples to use for the
727    /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
728    ///
729    /// A larger number of resamples reduces the random sampling errors, which are inherent to the
730    /// bootstrap method, but also increases the analysis time
731    ///
732    /// # Panics
733    ///
734    /// Panics if the number of resamples is set to zero
735    pub fn nresamples(mut self, n: usize) -> Criterion {
736        assert!(n > 0);
737
738        self.config.nresamples = n;
739        self
740    }
741
742    /// Changes the default noise threshold for benchmarks run with this runner.
743    ///
744    /// This threshold is used to decide if an increase of `X%` in the execution time is considered
745    /// significant or should be flagged as noise
746    ///
747    /// *Note:* A value of `0.02` is equivalent to `2%`
748    ///
749    /// # Panics
750    ///
751    /// Panics is the threshold is set to a negative value
752    pub fn noise_threshold(mut self, threshold: f64) -> Criterion {
753        assert!(threshold >= 0.0);
754
755        self.config.noise_threshold = threshold;
756        self
757    }
758
759    /// Changes the default confidence level for benchmarks run with this runner
760    ///
761    /// The confidence level is used to calculate the
762    /// [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) of the estimated
763    /// statistics
764    ///
765    /// # Panics
766    ///
767    /// Panics if the confidence level is set to a value outside the `(0, 1)` range
768    pub fn confidence_level(mut self, cl: f64) -> Criterion {
769        assert!(cl > 0.0 && cl < 1.0);
770
771        self.config.confidence_level = cl;
772        self
773    }
774
775    /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
776    /// for benchmarks run with this runner
777    ///
778    /// The significance level is used for
779    /// [hypothesis testing](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing)
780    ///
781    /// # Panics
782    ///
783    /// Panics if the significance level is set to a value outside the `(0, 1)` range
784    pub fn significance_level(mut self, sl: f64) -> Criterion {
785        assert!(sl > 0.0 && sl < 1.0);
786
787        self.config.significance_level = sl;
788        self
789    }
790
791    /// Enables plotting
792    #[cfg(feature = "html_reports")]
793    pub fn with_plots(mut self) -> Criterion {
794        use criterion_plot::VersionError;
795        self.plotting = match criterion_plot::version() {
796            Ok(_) => {
797                let mut reports: Vec<Box<Report>> = vec![];
798                reports.push(Box::new(CliReport::new(false, false, false)));
799                reports.push(Box::new(FileCsvReport));
800                reports.push(Box::new(Html::new()));
801                self.report = Box::new(Reports::new(reports));
802                Plotting::Enabled
803            }
804            Err(e) => {
805                match e {
806                    VersionError::Exec(_) => println!("Gnuplot not found, disabling plotting"),
807                    e => println!("Gnuplot not found or not usable, disabling plotting\n{}", e),
808                }
809                Plotting::NotAvailable
810            }
811        };
812
813        self
814    }
815
816    /// Enables plotting
817    #[cfg(not(feature = "html_reports"))]
818    pub fn with_plots(self) -> Criterion {
819        self
820    }
821
822    /// Disables plotting
823    pub fn without_plots(mut self) -> Criterion {
824        self.plotting = Plotting::Disabled;
825        self
826    }
827
828    /// Return true if generation of the plots is possible.
829    #[cfg(feature = "html_reports")]
830    pub fn can_plot(&self) -> bool {
831        match self.plotting {
832            Plotting::NotAvailable => false,
833            Plotting::Enabled => true,
834            _ => criterion_plot::version().is_ok(),
835        }
836    }
837
838    /// Return true if generation of the plots is possible.
839    #[cfg(not(feature = "html_reports"))]
840    pub fn can_plot(&self) -> bool {
841        false
842    }
843
844    /// Names an explicit baseline and enables overwriting the previous results.
845    pub fn save_baseline(mut self, baseline: String) -> Criterion {
846        self.baseline_directory = baseline;
847        self.baseline = Baseline::Save;
848        self
849    }
850
851    /// Names an explicit baseline and disables overwriting the previous results.
852    pub fn retain_baseline(mut self, baseline: String) -> Criterion {
853        self.baseline_directory = baseline;
854        self.baseline = Baseline::Compare;
855        self
856    }
857
858    /// Filters the benchmarks. Only benchmarks with names that contain the
859    /// given string will be executed.
860    pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion {
861        self.filter = Some(filter.into());
862
863        self
864    }
865
866    /// Set the output directory (currently for testing only)
867    #[doc(hidden)]
868    pub fn output_directory(mut self, path: &std::path::Path) -> Criterion {
869        self.output_directory = path.to_string_lossy().into_owned();
870
871        self
872    }
873
874    /// Generate the final summary at the end of a run.
875    #[doc(hidden)]
876    pub fn final_summary(&self) {
877        if self.profile_time.is_some() || self.test_mode {
878            return;
879        }
880
881        let report_context = ReportContext {
882            output_directory: self.output_directory.clone(),
883            plotting: self.plotting,
884            plot_config: PlotConfiguration::default(),
885            test_mode: self.test_mode,
886        };
887
888        self.report.final_summary(&report_context);
889    }
890
891    /// Configure this criterion struct based on the command-line arguments to
892    /// this process.
893    pub fn configure_from_args(mut self) -> Criterion {
894        use clap::{App, Arg};
895        let matches = App::new("Criterion Benchmark")
896            .arg(Arg::with_name("FILTER")
897                .help("Skip benchmarks whose names do not contain FILTER.")
898                .index(1))
899            .arg(Arg::with_name("color")
900                .short("c")
901                .long("color")
902                .alias("colour")
903                .takes_value(true)
904                .possible_values(&["auto", "always", "never"])
905                .default_value("auto")
906                .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
907            .arg(Arg::with_name("verbose")
908                .short("v")
909                .long("verbose")
910                .help("Print additional statistical information."))
911            .arg(Arg::with_name("noplot")
912                .short("n")
913                .long("noplot")
914                .help("Disable plot and HTML generation."))
915            .arg(Arg::with_name("save-baseline")
916                .short("s")
917                .long("save-baseline")
918                .default_value("base")
919                .help("Save results under a named baseline."))
920            .arg(Arg::with_name("baseline")
921                .short("b")
922                .long("baseline")
923                .takes_value(true)
924                .conflicts_with("save-baseline")
925                .help("Compare to a named baseline."))
926            .arg(Arg::with_name("list")
927                .long("list")
928                .help("List all benchmarks"))
929            .arg(Arg::with_name("measure-only")
930                .long("measure-only")
931                .hidden(true)
932                .help("Only perform measurements; do no analysis or storage of results. This is useful eg. when profiling the benchmarks, to reduce clutter in the profiling data."))
933            .arg(Arg::with_name("profile-time")
934                .long("profile-time")
935                .takes_value(true)
936                .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler."))
937            .arg(Arg::with_name("test")
938                .long("test")
939                .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results."))
940            //Ignored but always passed to benchmark executables
941            .arg(Arg::with_name("bench")
942                .hidden(true)
943                .long("bench"))
944            .arg(Arg::with_name("version")
945                .hidden(true)
946                .short("V")
947                .long("version"))
948            .after_help("
949This executable is a Criterion.rs benchmark.
950See https://github.com/bheisler/criterion.rs for more details.
951
952To enable debug output, define the environment variable CRITERION_DEBUG.
953Criterion.rs will output more debug information and will save the gnuplot
954scripts alongside the generated plots.
955")
956            .get_matches();
957
958        if let Some(filter) = matches.value_of("FILTER") {
959            self = self.with_filter(filter);
960        }
961
962        let verbose = matches.is_present("verbose");
963        let stdout_isatty = atty::is(atty::Stream::Stdout);
964        let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
965        let enable_text_coloring;
966        match matches.value_of("color") {
967            Some("always") => {
968                enable_text_coloring = true;
969            }
970            Some("never") => {
971                enable_text_coloring = false;
972                enable_text_overwrite = false;
973            }
974            _ => enable_text_coloring = stdout_isatty,
975        }
976
977        if matches.is_present("noplot") || matches.is_present("test") {
978            self = self.without_plots();
979        } else {
980            self = self.with_plots();
981        }
982
983        if let Some(dir) = matches.value_of("save-baseline") {
984            self.baseline = Baseline::Save;
985            self.baseline_directory = dir.to_owned()
986        }
987        if let Some(dir) = matches.value_of("baseline") {
988            self.baseline = Baseline::Compare;
989            self.baseline_directory = dir.to_owned();
990        }
991
992        let mut reports: Vec<Box<Report>> = vec![];
993        reports.push(Box::new(CliReport::new(
994            enable_text_overwrite,
995            enable_text_coloring,
996            verbose,
997        )));
998        reports.push(Box::new(FileCsvReport));
999
1000        // TODO: Remove this in 0.3.0
1001        if matches.is_present("measure-only") {
1002            println!("Warning: The '--measure-only' argument is deprecated and will be removed in Criterion.rs 0.3.0. Use '--profile-time' instead.");
1003            self.profile_time = Some(Duration::from_secs(5));
1004        }
1005        if matches.is_present("profile-time") {
1006            let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
1007                println!("{}", e);
1008                std::process::exit(1)
1009            });
1010
1011            if num_seconds < 1 {
1012                println!("Profile time must be at least one second.");
1013                std::process::exit(1);
1014            }
1015
1016            self.profile_time = Some(Duration::from_secs(num_seconds));
1017        }
1018        self.test_mode = matches.is_present("test");
1019        if matches.is_present("list") {
1020            self.test_mode = true;
1021            self.list_mode = true;
1022        }
1023
1024        #[cfg(feature = "html_reports")]
1025        {
1026            if self.profile_time.is_none() {
1027                reports.push(Box::new(Html::new()));
1028            }
1029        }
1030
1031        self.report = Box::new(Reports::new(reports));
1032
1033        self
1034    }
1035
1036    fn filter_matches(&self, id: &str) -> bool {
1037        match self.filter {
1038            Some(ref string) => id.contains(string),
1039            None => true,
1040        }
1041    }
1042
1043    /// Benchmarks a function
1044    ///
1045    /// # Example
1046    ///
1047    /// ```rust
1048    /// # #[macro_use] extern crate criterion;
1049    /// # use self::criterion::*;
1050    ///
1051    /// fn bench(c: &mut Criterion) {
1052    ///     // Setup (construct data, allocate memory, etc)
1053    ///     c.bench_function(
1054    ///         "function_name",
1055    ///         |b| b.iter(|| {
1056    ///             // Code to benchmark goes here
1057    ///         }),
1058    ///     );
1059    /// }
1060    ///
1061    /// criterion_group!(benches, bench);
1062    /// criterion_main!(benches);
1063    /// ```
1064    pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion
1065    where
1066        F: FnMut(&mut Bencher) + 'static,
1067    {
1068        self.bench(id, Benchmark::new(id, f))
1069    }
1070
1071    /// Benchmarks multiple functions
1072    ///
1073    /// All functions get the same input and are compared with the other implementations.
1074    /// Works similar to `bench_function`, but with multiple functions.
1075    ///
1076    /// # Example
1077    ///
1078    /// ``` rust
1079    /// # #[macro_use] extern crate criterion;
1080    /// # use self::criterion::*;
1081    /// # fn seq_fib(i: &u32) {}
1082    /// # fn par_fib(i: &u32) {}
1083    ///
1084    /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
1085    ///     b.iter(|| {
1086    ///         seq_fib(i);
1087    ///     });
1088    /// }
1089    ///
1090    /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
1091    ///     b.iter(|| {
1092    ///         par_fib(i);
1093    ///     });
1094    /// }
1095    ///
1096    /// fn bench(c: &mut Criterion) {
1097    ///     let sequential_fib = Fun::new("Sequential", bench_seq_fib);
1098    ///     let parallel_fib = Fun::new("Parallel", bench_par_fib);
1099    ///     let funs = vec![sequential_fib, parallel_fib];
1100    ///
1101    ///     c.bench_functions("Fibonacci", funs, 14);
1102    /// }
1103    ///
1104    /// criterion_group!(benches, bench);
1105    /// criterion_main!(benches);
1106    /// ```
1107    pub fn bench_functions<I>(&mut self, id: &str, funs: Vec<Fun<I>>, input: I) -> &mut Criterion
1108    where
1109        I: fmt::Debug + 'static,
1110    {
1111        let benchmark = ParameterizedBenchmark::with_functions(
1112            funs.into_iter().map(|fun| fun.f).collect(),
1113            vec![input],
1114        );
1115
1116        self.bench(id, benchmark)
1117    }
1118
1119    /// Benchmarks a function under various inputs
1120    ///
1121    /// This is a convenience method to execute several related benchmarks. Each benchmark will
1122    /// receive the id: `${id}/${input}`.
1123    ///
1124    /// # Example
1125    ///
1126    /// ```rust
1127    /// # #[macro_use] extern crate criterion;
1128    /// # use self::criterion::*;
1129    ///
1130    /// fn bench(c: &mut Criterion) {
1131    ///     c.bench_function_over_inputs("from_elem",
1132    ///         |b: &mut Bencher, size: &usize| {
1133    ///             b.iter(|| vec![0u8; *size]);
1134    ///         },
1135    ///         vec![1024, 2048, 4096]
1136    ///     );
1137    /// }
1138    ///
1139    /// criterion_group!(benches, bench);
1140    /// criterion_main!(benches);
1141    /// ```
1142    pub fn bench_function_over_inputs<I, F>(&mut self, id: &str, f: F, inputs: I) -> &mut Criterion
1143    where
1144        I: IntoIterator,
1145        I::Item: fmt::Debug + 'static,
1146        F: FnMut(&mut Bencher, &I::Item) + 'static,
1147    {
1148        self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
1149    }
1150
1151    /// Benchmarks an external program
1152    ///
1153    /// The external program must:
1154    ///
1155    /// * Read the number of iterations from stdin
1156    /// * Execute the routine to benchmark that many times
1157    /// * Print the elapsed time (in nanoseconds) to stdout
1158    ///
1159    /// ```rust,no_run
1160    /// # use std::io::{self, BufRead};
1161    /// # use std::time::Instant;
1162    /// # use std::time::Duration;
1163    /// # trait DurationExt { fn to_nanos(&self) -> u64 { 0 } }
1164    /// # impl DurationExt for Duration {}
1165    /// // Example of an external program that implements this protocol
1166    ///
1167    /// fn main() {
1168    ///     let stdin = io::stdin();
1169    ///     let ref mut stdin = stdin.lock();
1170    ///
1171    ///     // For each line in stdin
1172    ///     for line in stdin.lines() {
1173    ///         // Parse line as the number of iterations
1174    ///         let iters: u64 = line.unwrap().trim().parse().unwrap();
1175    ///
1176    ///         // Setup
1177    ///
1178    ///         // Benchmark
1179    ///         let start = Instant::now();
1180    ///         // Execute the routine "iters" times
1181    ///         for _ in 0..iters {
1182    ///             // Code to benchmark goes here
1183    ///         }
1184    ///         let elapsed = start.elapsed();
1185    ///
1186    ///         // Teardown
1187    ///
1188    ///         // Report elapsed time in nanoseconds to stdout
1189    ///         println!("{}", elapsed.to_nanos());
1190    ///     }
1191    /// }
1192    /// ```
1193    #[deprecated(
1194        since = "0.2.6",
1195        note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0"
1196    )]
1197    #[allow(deprecated)]
1198    pub fn bench_program(&mut self, id: &str, program: Command) -> &mut Criterion {
1199        self.bench(id, Benchmark::new_external(id, program))
1200    }
1201
1202    /// Benchmarks an external program under various inputs
1203    ///
1204    /// This is a convenience method to execute several related benchmarks. Each benchmark will
1205    /// receive the id: `${id}/${input}`.
1206    #[deprecated(
1207        since = "0.2.6",
1208        note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0"
1209    )]
1210    #[allow(deprecated)]
1211    pub fn bench_program_over_inputs<I, F>(
1212        &mut self,
1213        id: &str,
1214        mut program: F,
1215        inputs: I,
1216    ) -> &mut Criterion
1217    where
1218        F: FnMut() -> Command + 'static,
1219        I: IntoIterator,
1220        I::Item: fmt::Debug + 'static,
1221    {
1222        self.bench(
1223            id,
1224            ParameterizedBenchmark::new_external(
1225                id,
1226                move |i| {
1227                    let mut command = program();
1228                    command.arg(format!("{:?}", i));
1229                    command
1230                },
1231                inputs,
1232            ),
1233        )
1234    }
1235
1236    /// Executes the given benchmark. Use this variant to execute benchmarks
1237    /// with complex configuration. This can be used to compare multiple
1238    /// functions, execute benchmarks with custom configuration settings and
1239    /// more. See the Benchmark and ParameterizedBenchmark structs for more
1240    /// information.
1241    ///
1242    /// ```rust
1243    /// # #[macro_use] extern crate criterion;
1244    /// # use criterion::*;
1245    /// # fn routine_1() {}
1246    /// # fn routine_2() {}
1247    ///
1248    /// fn bench(c: &mut Criterion) {
1249    ///     // Setup (construct data, allocate memory, etc)
1250    ///     c.bench(
1251    ///         "routines",
1252    ///         Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
1253    ///             .with_function("routine_2", |b| b.iter(|| routine_2()))
1254    ///             .sample_size(50)
1255    ///     );
1256    /// }
1257    ///
1258    /// criterion_group!(benches, bench);
1259    /// criterion_main!(benches);
1260    /// ```
1261    pub fn bench<B: BenchmarkDefinition>(
1262        &mut self,
1263        group_id: &str,
1264        benchmark: B,
1265    ) -> &mut Criterion {
1266        benchmark.run(group_id, self);
1267        self
1268    }
1269}
1270
1271mod plotting {
1272    #[derive(Debug, Clone, Copy)]
1273    pub enum Plotting {
1274        Unset,
1275        Disabled,
1276        Enabled,
1277        NotAvailable,
1278    }
1279
1280    impl Plotting {
1281        pub fn is_enabled(self) -> bool {
1282            match self {
1283                Plotting::Enabled => true,
1284                _ => false,
1285            }
1286        }
1287    }
1288}
1289
1290trait DurationExt {
1291    fn to_nanos(&self) -> u64;
1292}
1293
1294const NANOS_PER_SEC: u64 = 1_000_000_000;
1295
1296impl DurationExt for Duration {
1297    fn to_nanos(&self) -> u64 {
1298        self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
1299    }
1300}
1301
1302#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
1303struct ConfidenceInterval {
1304    confidence_level: f64,
1305    lower_bound: f64,
1306    upper_bound: f64,
1307}
1308
1309#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
1310struct Estimate {
1311    /// The confidence interval for this estimate
1312    confidence_interval: ConfidenceInterval,
1313    ///
1314    point_estimate: f64,
1315    /// The standard error of this estimate
1316    standard_error: f64,
1317}
1318
1319fn build_estimates(
1320    distributions: &Distributions,
1321    points: &BTreeMap<Statistic, f64>,
1322    cl: f64,
1323) -> Estimates {
1324    distributions
1325        .iter()
1326        .map(|(&statistic, distribution)| {
1327            let point_estimate = points[&statistic];
1328            let (lb, ub) = distribution.confidence_interval(cl);
1329
1330            (
1331                statistic,
1332                Estimate {
1333                    confidence_interval: ConfidenceInterval {
1334                        confidence_level: cl,
1335                        lower_bound: lb,
1336                        upper_bound: ub,
1337                    },
1338                    point_estimate,
1339                    standard_error: distribution.std_dev(None),
1340                },
1341            )
1342        })
1343        .collect()
1344}
1345
1346/// Enum representing different ways of measuring the throughput of benchmarked code.
1347/// If the throughput setting is configured for a benchmark then the estimated throughput will
1348/// be reported as well as the time per iteration.
1349#[derive(Debug, Clone, Serialize, Deserialize)]
1350pub enum Throughput {
1351    /// Measure throughput in terms of bytes/second. The value should be the number of bytes
1352    /// processed by one iteration of the benchmarked code. Typically, this would be the length of
1353    /// an input string or `&[u8]`.
1354    Bytes(u32),
1355
1356    /// Measure throughput in terms of elements/second. The value should be the number of elements
1357    /// processed by one iteration of the benchmarked code. Typically, this would be the size of a
1358    /// collection, but could also be the number of lines of input text or the number of values to
1359    /// parse.
1360    Elements(u32),
1361}
1362
1363/// Axis scaling type
1364#[derive(Debug, Clone, Copy)]
1365pub enum AxisScale {
1366    /// Axes scale linearly
1367    Linear,
1368
1369    /// Axes scale logarithmically
1370    Logarithmic,
1371}
1372
1373/// Contains the configuration options for the plots generated by a particular benchmark
1374/// or benchmark group.
1375///
1376/// ```rust
1377/// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
1378///
1379/// let plot_config = PlotConfiguration::default()
1380///     .summary_scale(AxisScale::Logarithmic);
1381///
1382/// Benchmark::new("test", |b| b.iter(|| 10))
1383///     .plot_config(plot_config);
1384/// ```
1385#[derive(Debug, Clone)]
1386pub struct PlotConfiguration {
1387    summary_scale: AxisScale,
1388}
1389
1390impl Default for PlotConfiguration {
1391    fn default() -> PlotConfiguration {
1392        PlotConfiguration {
1393            summary_scale: AxisScale::Linear,
1394        }
1395    }
1396}
1397
1398impl PlotConfiguration {
1399    /// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
1400    /// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
1401    /// Defaults to linear.
1402    pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1403        self.summary_scale = new_scale;
1404        self
1405    }
1406}