rand_jitter/
lib.rs

1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8//
9// Based on jitterentropy-library, http://www.chronox.de/jent.html.
10// Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2017.
11//
12// With permission from Stephan Mueller to relicense the Rust translation under
13// the MIT license.
14
15//! Non-physical true random number generator based on timing jitter.
16//!
17//! This is a true random number generator, as opposed to pseudo-random
18//! generators. Random numbers generated by `JitterRng` can be seen as fresh
19//! entropy. A consequence is that it is orders of magnitude slower than `OsRng`
20//! and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower).
21//!
22//! There are very few situations where using this RNG is appropriate. Only very
23//! few applications require true entropy. A normal PRNG can be statistically
24//! indistinguishable, and a cryptographic PRNG should also be as impossible to
25//! predict.
26//!
27//! Use of `JitterRng` is recommended for initializing cryptographic PRNGs when
28//! `OsRng` is not available.
29//!
30//! `JitterRng` can be used without the standard library, but not conveniently,
31//! you must provide a high-precision timer and carefully have to follow the
32//! instructions of [`JitterRng::new_with_timer`].
33//!
34//! This implementation is based on [Jitterentropy] version 2.1.0.
35//!
36//! Note: There is no accurate timer available on WASM platforms, to help
37//! prevent fingerprinting or timing side-channel attacks. Therefore
38//! [`JitterRng::new()`] is not available on WASM. It is also unavailable
39//! with disabled `std` feature.
40//!
41//! [Jitterentropy]: http://www.chronox.de/jent.html
42
43#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
44       html_favicon_url = "https://www.rust-lang.org/favicon.ico",
45       html_root_url = "https://rust-random.github.io/rand/")]
46
47#![deny(missing_docs)]
48#![deny(missing_debug_implementations)]
49#![doc(test(attr(allow(unused_variables), deny(warnings))))]
50
51// Note: the C implementation of `Jitterentropy` relies on being compiled
52// without optimizations. This implementation goes through lengths to make the
53// compiler not optimize out code which does influence timing jitter, but is
54// technically dead code.
55#![no_std]
56pub extern crate rand_core;
57#[cfg(feature = "std")]
58extern crate std;
59#[cfg(feature = "log")]
60#[macro_use] extern crate log;
61#[cfg(any(target_os = "macos", target_os = "ios"))]
62extern crate libc;
63#[cfg(target_os = "windows")]
64extern crate winapi;
65
66
67#[cfg(not(feature = "log"))]
68#[macro_use] mod dummy_log;
69#[cfg(feature = "std")]
70mod platform;
71mod error;
72
73use rand_core::{RngCore, CryptoRng, Error, impls};
74pub use error::TimerError;
75
76use core::{fmt, mem, ptr};
77#[cfg(feature = "std")]
78use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
79
80const MEMORY_BLOCKS: usize = 64;
81const MEMORY_BLOCKSIZE: usize = 32;
82const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE;
83
84/// A true random number generator based on jitter in the CPU execution time,
85/// and jitter in memory access time.
86pub struct JitterRng {
87    data: u64, // Actual random number
88    // Number of rounds to run the entropy collector per 64 bits
89    rounds: u8,
90    // Timer used by `measure_jitter`
91    timer: fn() -> u64,
92    // Memory for the Memory Access noise source
93    mem_prev_index: u16,
94    // Make `next_u32` not waste 32 bits
95    data_half_used: bool,
96}
97
98// Note: `JitterRng` maintains a small 64-bit entropy pool. With every
99// `generate` 64 new bits should be integrated in the pool. If a round of
100// `generate` were to collect less than the expected 64 bit, then the returned
101// value, and the new state of the entropy pool, would be in some way related to
102// the initial state. It is therefore better if the initial state of the entropy
103// pool is different on each call to `generate`. This has a few implications:
104// - `generate` should be called once before using `JitterRng` to produce the
105//   first usable value (this is done by default in `new`);
106// - We do not zero the entropy pool after generating a result. The reference
107//   implementation also does not support zeroing, but recommends generating a
108//   new value without using it if you want to protect a previously generated
109//   'secret' value from someone inspecting the memory;
110// - Implementing `Clone` seems acceptable, as it would not cause the systematic
111//   bias a constant might cause. Only instead of one value that could be
112//   potentially related to the same initial state, there are now two.
113
114// Entropy collector state.
115// These values are not necessary to preserve across runs.
116struct EcState {
117    // Previous time stamp to determine the timer delta
118    prev_time: u64,
119    // Deltas used for the stuck test
120    last_delta: i32,
121    last_delta2: i32,
122    // Memory for the Memory Access noise source
123    mem: [u8; MEMORY_SIZE],
124}
125
126impl EcState {
127    // Stuck test by checking the:
128    // - 1st derivation of the jitter measurement (time delta)
129    // - 2nd derivation of the jitter measurement (delta of time deltas)
130    // - 3rd derivation of the jitter measurement (delta of delta of time
131    //   deltas)
132    //
133    // All values must always be non-zero.
134    // This test is a heuristic to see whether the last measurement holds
135    // entropy.
136    fn stuck(&mut self, current_delta: i32) -> bool {
137        let delta2 = self.last_delta - current_delta;
138        let delta3 = delta2 - self.last_delta2;
139
140        self.last_delta = current_delta;
141        self.last_delta2 = delta2;
142
143        current_delta == 0 || delta2 == 0 || delta3 == 0
144    }
145}
146
147// Custom Debug implementation that does not expose the internal state
148impl fmt::Debug for JitterRng {
149    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
150        write!(f, "JitterRng {{}}")
151    }
152}
153
154impl Clone for JitterRng {
155    fn clone(&self) -> JitterRng {
156        JitterRng {
157            data: self.data,
158            rounds: self.rounds,
159            timer: self.timer,
160            mem_prev_index: self.mem_prev_index,
161            // The 32 bits that may still be unused from the previous round are
162            // for the original to use, not for the clone.
163            data_half_used: false,
164        }
165    }
166}
167
168// Initialise to zero; must be positive
169#[cfg(all(feature = "std", not(target_arch = "wasm32")))]
170static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT;
171
172impl JitterRng {
173    /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a
174    /// platform-specific function with higher accuracy if necessary and
175    /// available.
176    ///
177    /// During initialization CPU execution timing jitter is measured a few
178    /// hundred times. If this does not pass basic quality tests, an error is
179    /// returned. The test result is cached to make subsequent calls faster.
180    #[cfg(all(feature = "std", not(target_arch = "wasm32")))]
181    pub fn new() -> Result<JitterRng, TimerError> {
182        if cfg!(target_arch = "wasm32") {
183            return Err(TimerError::NoTimer);
184        }
185        let mut state = JitterRng::new_with_timer(platform::get_nstime);
186        let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8;
187        if rounds == 0 {
188            // No result yet: run test.
189            // This allows the timer test to run multiple times; we don't care.
190            rounds = state.test_timer()?;
191            JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed);
192            info!("JitterRng: using {} rounds per u64 output", rounds);
193        }
194        state.set_rounds(rounds);
195
196        // Fill `data` with a non-zero value.
197        state.gen_entropy();
198        Ok(state)
199    }
200
201    /// Create a new `JitterRng`.
202    /// A custom timer can be supplied, making it possible to use `JitterRng` in
203    /// `no_std` environments.
204    ///
205    /// The timer must have nanosecond precision.
206    ///
207    /// This method is more low-level than `new()`. It is the responsibility of
208    /// the caller to run [`test_timer`] before using any numbers generated with
209    /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to
210    /// consume at least one `u64` before using the first result to initialize
211    /// the entropy collection pool.
212    ///
213    /// # Example
214    ///
215    /// ```
216    /// # use rand_jitter::rand_core::{RngCore, Error};
217    /// use rand_jitter::JitterRng;
218    ///
219    /// # fn try_inner() -> Result<(), Error> {
220    /// fn get_nstime() -> u64 {
221    ///     use std::time::{SystemTime, UNIX_EPOCH};
222    ///
223    ///     let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
224    ///     // The correct way to calculate the current time is
225    ///     // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64`
226    ///     // But this is faster, and the difference in terms of entropy is
227    ///     // negligible (log2(10^9) == 29.9).
228    ///     dur.as_secs() << 30 | dur.subsec_nanos() as u64
229    /// }
230    ///
231    /// let mut rng = JitterRng::new_with_timer(get_nstime);
232    /// let rounds = rng.test_timer()?;
233    /// rng.set_rounds(rounds); // optional
234    /// let _ = rng.next_u64();
235    ///
236    /// // Ready for use
237    /// let v: u64 = rng.next_u64();
238    /// # Ok(())
239    /// # }
240    ///
241    /// # let _ = try_inner();
242    /// ```
243    ///
244    /// [`test_timer`]: JitterRng::test_timer
245    /// [`set_rounds`]: JitterRng::set_rounds
246    pub fn new_with_timer(timer: fn() -> u64) -> JitterRng {
247        JitterRng {
248            data: 0,
249            rounds: 64,
250            timer,
251            mem_prev_index: 0,
252            data_half_used: false,
253        }
254    }
255
256    /// Configures how many rounds are used to generate each 64-bit value.
257    /// This must be greater than zero, and has a big impact on performance
258    /// and output quality.
259    ///
260    /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds
261    /// can be used. The `test_timer()` function returns the minimum number of
262    /// rounds required for full strength (platform dependent), so one may use
263    /// `rng.set_rounds(rng.test_timer()?);` or cache the value.
264    ///
265    /// [`new_with_timer`]: JitterRng::new_with_timer
266    pub fn set_rounds(&mut self, rounds: u8) {
267        assert!(rounds > 0);
268        self.rounds = rounds;
269    }
270
271    // Calculate a random loop count used for the next round of an entropy
272    // collection, based on bits from a fresh value from the timer.
273    //
274    // The timer is folded to produce a number that contains at most `n_bits`
275    // bits.
276    //
277    // Note: A constant should be added to the resulting random loop count to
278    // prevent loops that run 0 times.
279    #[inline(never)]
280    fn random_loop_cnt(&mut self, n_bits: u32) -> u32 {
281        let mut rounds = 0;
282
283        let mut time = (self.timer)();
284        // Mix with the current state of the random number balance the random
285        // loop counter a bit more.
286        time ^= self.data;
287
288        // We fold the time value as much as possible to ensure that as many
289        // bits of the time stamp are included as possible.
290        let folds = (64 + n_bits - 1) / n_bits;
291        let mask = (1 << n_bits) - 1;
292        for _ in 0..folds {
293            rounds ^= time & mask;
294            time >>= n_bits;
295        }
296
297        rounds as u32
298    }
299
300    // CPU jitter noise source
301    // Noise source based on the CPU execution time jitter
302    //
303    // This function injects the individual bits of the time value into the
304    // entropy pool using an LFSR.
305    //
306    // The code is deliberately inefficient with respect to the bit shifting.
307    // This function not only acts as folding operation, but this function's
308    // execution is used to measure the CPU execution time jitter. Any change to
309    // the loop in this function implies that careful retesting must be done.
310    #[inline(never)]
311    fn lfsr_time(&mut self, time: u64, var_rounds: bool) {
312        fn lfsr(mut data: u64, time: u64) -> u64{
313            for i in 1..65 {
314                let mut tmp = time << (64 - i);
315                tmp >>= 64 - 1;
316
317                // Fibonacci LSFR with polynomial of
318                // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
319                // primitive according to
320                // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
321                // (the shift values are the polynomial values minus one
322                // due to counting bits from 0 to 63). As the current
323                // position is always the LSB, the polynomial only needs
324                // to shift data in from the left without wrap.
325                data ^= tmp;
326                data ^= (data >> 63) & 1;
327                data ^= (data >> 60) & 1;
328                data ^= (data >> 55) & 1;
329                data ^= (data >> 30) & 1;
330                data ^= (data >> 27) & 1;
331                data ^= (data >> 22) & 1;
332                data = data.rotate_left(1);
333            }
334            data
335        }
336
337        // Note: in the reference implementation only the last round effects
338        // `self.data`, all the other results are ignored. To make sure the
339        // other rounds are not optimised out, we first run all but the last
340        // round on a throw-away value instead of the real `self.data`.
341        let mut lfsr_loop_cnt = 0;
342        if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) };
343
344        let mut throw_away: u64 = 0;
345        for _ in 0..lfsr_loop_cnt {
346            throw_away = lfsr(throw_away, time);
347        }
348        black_box(throw_away);
349
350        self.data = lfsr(self.data, time);
351    }
352
353    // Memory Access noise source
354    // This is a noise source based on variations in memory access times
355    //
356    // This function performs memory accesses which will add to the timing
357    // variations due to an unknown amount of CPU wait states that need to be
358    // added when accessing memory. The memory size should be larger than the L1
359    // caches as outlined in the documentation and the associated testing.
360    //
361    // The L1 cache has a very high bandwidth, albeit its access rate is usually
362    // slower than accessing CPU registers. Therefore, L1 accesses only add
363    // minimal variations as the CPU has hardly to wait. Starting with L2,
364    // significant variations are added because L2 typically does not belong to
365    // the CPU any more and therefore a wider range of CPU wait states is
366    // necessary for accesses. L3 and real memory accesses have even a wider
367    // range of wait states. However, to reliably access either L3 or memory,
368    // the `self.mem` memory must be quite large which is usually not desirable.
369    #[inline(never)]
370    fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) {
371        let mut acc_loop_cnt = 128;
372        if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) };
373
374        let mut index = self.mem_prev_index as usize;
375        for _ in 0..acc_loop_cnt {
376            // Addition of memblocksize - 1 to index with wrap around logic to
377            // ensure that every memory location is hit evenly.
378            // The modulus also allows the compiler to remove the indexing
379            // bounds check.
380            index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE;
381
382            // memory access: just add 1 to one byte
383            // memory access implies read from and write to memory location
384            mem[index] = mem[index].wrapping_add(1);
385        }
386        self.mem_prev_index = index as u16;
387    }
388
389    // This is the heart of the entropy generation: calculate time deltas and
390    // use the CPU jitter in the time deltas. The jitter is injected into the
391    // entropy pool.
392    //
393    // Ensure that `ec.prev_time` is primed before using the output of this
394    // function. This can be done by calling this function and not using its
395    // result.
396    fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> {
397        // Invoke one noise source before time measurement to add variations
398        self.memaccess(&mut ec.mem, true);
399
400        // Get time stamp and calculate time delta to previous
401        // invocation to measure the timing variations
402        let time = (self.timer)();
403        // Note: wrapping_sub combined with a cast to `i64` generates a correct
404        // delta, even in the unlikely case this is a timer that is not strictly
405        // monotonic.
406        let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32;
407        ec.prev_time = time;
408
409        // Call the next noise source which also injects the data
410        self.lfsr_time(current_delta as u64, true);
411
412        // Check whether we have a stuck measurement (i.e. does the last
413        // measurement holds entropy?).
414        if ec.stuck(current_delta) { return None };
415
416        // Rotate the data buffer by a prime number (any odd number would
417        // do) to ensure that every bit position of the input time stamp
418        // has an even chance of being merged with a bit position in the
419        // entropy pool. We do not use one here as the adjacent bits in
420        // successive time deltas may have some form of dependency. The
421        // chosen value of 7 implies that the low 7 bits of the next
422        // time delta value is concatenated with the current time delta.
423        self.data = self.data.rotate_left(7);
424
425        Some(())
426    }
427
428    // Shuffle the pool a bit by mixing some value with a bijective function
429    // (XOR) into the pool.
430    //
431    // The function generates a mixer value that depends on the bits set and
432    // the location of the set bits in the random number generated by the
433    // entropy source. Therefore, based on the generated random number, this
434    // mixer value can have 2^64 different values. That mixer value is
435    // initialized with the first two SHA-1 constants. After obtaining the
436    // mixer value, it is XORed into the random number.
437    //
438    // The mixer value is not assumed to contain any entropy. But due to the
439    // XOR operation, it can also not destroy any entropy present in the
440    // entropy pool.
441    #[inline(never)]
442    fn stir_pool(&mut self) {
443        // This constant is derived from the first two 32 bit initialization
444        // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
445        // The order does not really matter as we do not rely on the specific
446        // numbers. We just pick the SHA-1 constants as they have a good mix of
447        // bit set and unset.
448        const CONSTANT: u64 = 0x67452301efcdab89;
449
450        // The start value of the mixer variable is derived from the third
451        // and fourth 32 bit initialization vector of SHA-1 as defined in
452        // FIPS 180-4 section 5.3.1
453        let mut mixer = 0x98badcfe10325476;
454
455        // This is a constant time function to prevent leaking timing
456        // information about the random number.
457        // The normal code is:
458        // ```
459        // for i in 0..64 {
460        //     if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; }
461        // }
462        // ```
463        // This is a bit fragile, as LLVM really wants to use branches here, and
464        // we rely on it to not recognise the opportunity.
465        for i in 0..64 {
466            let apply = (self.data >> i) & 1;
467            let mask = !apply.wrapping_sub(1);
468            mixer ^= CONSTANT & mask;
469            mixer = mixer.rotate_left(1);
470        }
471
472        self.data ^= mixer;
473    }
474
475    fn gen_entropy(&mut self) -> u64 {
476        trace!("JitterRng: collecting entropy");
477
478        // Prime `ec.prev_time`, and run the noice sources to make sure the
479        // first loop round collects the expected entropy.
480        let mut ec = EcState {
481            prev_time: (self.timer)(),
482            last_delta: 0,
483            last_delta2: 0,
484            mem: [0; MEMORY_SIZE],
485        };
486        let _ = self.measure_jitter(&mut ec);
487
488        for _ in 0..self.rounds {
489            // If a stuck measurement is received, repeat measurement
490            // Note: we do not guard against an infinite loop, that would mean
491            // the timer suddenly became broken.
492            while self.measure_jitter(&mut ec).is_none() {}
493        }
494
495        // Do a single read from `self.mem` to make sure the Memory Access noise
496        // source is not optimised out.
497        black_box(ec.mem[0]);
498
499        self.stir_pool();
500        self.data
501    }
502
503    /// Basic quality tests on the timer, by measuring CPU timing jitter a few
504    /// hundred times.
505    ///
506    /// If succesful, this will return the estimated number of rounds necessary
507    /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause
508    /// of the failure will be returned.
509    pub fn test_timer(&mut self) -> Result<u8, TimerError> {
510        debug!("JitterRng: testing timer ...");
511        // We could add a check for system capabilities such as `clock_getres`
512        // or check for `CONFIG_X86_TSC`, but it does not make much sense as the
513        // following sanity checks verify that we have a high-resolution timer.
514
515        let mut delta_sum = 0;
516        let mut old_delta = 0;
517
518        let mut time_backwards = 0;
519        let mut count_mod = 0;
520        let mut count_stuck = 0;
521
522        let mut ec = EcState {
523            prev_time: (self.timer)(),
524            last_delta: 0,
525            last_delta2: 0,
526            mem: [0; MEMORY_SIZE],
527        };
528
529        // TESTLOOPCOUNT needs some loops to identify edge systems.
530        // 100 is definitely too little.
531        const TESTLOOPCOUNT: u64 = 300;
532        const CLEARCACHE: u64 = 100;
533
534        for i in 0..(CLEARCACHE + TESTLOOPCOUNT) {
535            // Measure time delta of core entropy collection logic
536            let time = (self.timer)();
537            self.memaccess(&mut ec.mem, true);
538            self.lfsr_time(time, true);
539            let time2 = (self.timer)();
540
541            // Test whether timer works
542            if time == 0 || time2 == 0 {
543                return Err(TimerError::NoTimer);
544            }
545            let delta = time2.wrapping_sub(time) as i64 as i32;
546
547            // Test whether timer is fine grained enough to provide delta even
548            // when called shortly after each other -- this implies that we also
549            // have a high resolution timer
550            if delta == 0 {
551                return Err(TimerError::CoarseTimer);
552            }
553
554            // Up to here we did not modify any variable that will be
555            // evaluated later, but we already performed some work. Thus we
556            // already have had an impact on the caches, branch prediction,
557            // etc. with the goal to clear it to get the worst case
558            // measurements.
559            if i < CLEARCACHE { continue; }
560
561            if ec.stuck(delta) { count_stuck += 1; }
562
563            // Test whether we have an increasing timer.
564            if !(time2 > time) { time_backwards += 1; }
565
566            // Count the number of times the counter increases in steps of 100ns
567            // or greater.
568            if (delta % 100) == 0 { count_mod += 1; }
569
570            // Ensure that we have a varying delta timer which is necessary for
571            // the calculation of entropy -- perform this check only after the
572            // first loop is executed as we need to prime the old_delta value
573            delta_sum += (delta - old_delta).abs() as u64;
574            old_delta = delta;
575        }
576
577        // Do a single read from `self.mem` to make sure the Memory Access noise
578        // source is not optimised out.
579        black_box(ec.mem[0]);
580
581        // We allow the time to run backwards for up to three times.
582        // This can happen if the clock is being adjusted by NTP operations.
583        // If such an operation just happens to interfere with our test, it
584        // should not fail. The value of 3 should cover the NTP case being
585        // performed during our test run.
586        if time_backwards > 3 {
587            return Err(TimerError::NotMonotonic);
588        }
589
590        // Test that the available amount of entropy per round does not get to
591        // low. We expect 1 bit of entropy per round as a reasonable minimum
592        // (although less is possible, it means the collector loop has to run
593        // much more often).
594        // `assert!(delta_average >= log2(1))`
595        // `assert!(delta_sum / TESTLOOPCOUNT >= 1)`
596        // `assert!(delta_sum >= TESTLOOPCOUNT)`
597        if delta_sum < TESTLOOPCOUNT {
598            return Err(TimerError::TinyVariantions);
599        }
600
601        // Ensure that we have variations in the time stamp below 100 for at
602        // least 10% of all checks -- on some platforms, the counter increments
603        // in multiples of 100, but not always
604        if count_mod > (TESTLOOPCOUNT * 9 / 10) {
605            return Err(TimerError::CoarseTimer);
606        }
607
608        // If we have more than 90% stuck results, then this Jitter RNG is
609        // likely to not work well.
610        if count_stuck > (TESTLOOPCOUNT * 9 / 10) {
611            return Err(TimerError::TooManyStuck);
612        }
613
614        // Estimate the number of `measure_jitter` rounds necessary for 64 bits
615        // of entropy.
616        //
617        // We don't try very hard to come up with a good estimate of the
618        // available bits of entropy per round here for two reasons:
619        // 1. Simple estimates of the available bits (like Shannon entropy) are
620        //    too optimistic.
621        // 2. Unless we want to waste a lot of time during intialization, there
622        //    only a small number of samples are available.
623        //
624        // Therefore we use a very simple and conservative estimate:
625        // `let bits_of_entropy = log2(delta_average) / 2`.
626        //
627        // The number of rounds `measure_jitter` should run to collect 64 bits
628        // of entropy is `64 / bits_of_entropy`.
629        let delta_average = delta_sum / TESTLOOPCOUNT;
630
631        if delta_average >= 16 {
632            let log2 = 64 - delta_average.leading_zeros();
633            // Do something similar to roundup(64/(log2/2)):
634            Ok( ((64u32 * 2 + log2 - 1) / log2) as u8)
635        } else {
636            // For values < 16 the rounding error becomes too large, use a
637            // lookup table.
638            // Values 0 and 1 are invalid, and filtered out by the
639            // `delta_sum < TESTLOOPCOUNT` test above.
640            let log2_lookup = [0,  0, 128, 81, 64, 56, 50, 46,
641                               43, 41, 39, 38, 36, 35, 34, 33];
642            Ok(log2_lookup[delta_average as usize])
643        }
644    }
645
646    /// Statistical test: return the timer delta of one normal run of the
647    /// `JitterRng` entropy collector.
648    ///
649    /// Setting `var_rounds` to `true` will execute the memory access and the
650    /// CPU jitter noice sources a variable amount of times (just like a real
651    /// `JitterRng` round).
652    ///
653    /// Setting `var_rounds` to `false` will execute the noice sources the
654    /// minimal number of times. This can be used to measure the minimum amount
655    /// of entropy one round of the entropy collector can collect in the worst
656    /// case.
657    ///
658    /// See this crate's README on how to use `timer_stats` to test the quality
659    /// of `JitterRng`.
660    pub fn timer_stats(&mut self, var_rounds: bool) -> i64 {
661        let mut mem = [0; MEMORY_SIZE];
662
663        let time = (self.timer)();
664        self.memaccess(&mut mem, var_rounds);
665        self.lfsr_time(time, var_rounds);
666        let time2 = (self.timer)();
667        time2.wrapping_sub(time) as i64
668    }
669}
670
671// A function that is opaque to the optimizer to assist in avoiding dead-code
672// elimination. Taken from `bencher`.
673fn black_box<T>(dummy: T) -> T {
674    unsafe {
675        let ret = ptr::read_volatile(&dummy);
676        mem::forget(dummy);
677        ret
678    }
679}
680
681impl RngCore for JitterRng {
682    fn next_u32(&mut self) -> u32 {
683        // We want to use both parts of the generated entropy
684        if self.data_half_used {
685            self.data_half_used = false;
686            (self.data >> 32) as u32
687        } else {
688            self.data = self.next_u64();
689            self.data_half_used = true;
690            self.data as u32
691        }
692    }
693
694    fn next_u64(&mut self) -> u64 {
695       self.data_half_used = false;
696       self.gen_entropy()
697    }
698
699    fn fill_bytes(&mut self, dest: &mut [u8]) {
700        // Fill using `next_u32`. This is faster for filling small slices (four
701        // bytes or less), while the overhead is negligible.
702        //
703        // This is done especially for wrappers that implement `next_u32`
704        // themselves via `fill_bytes`.
705        impls::fill_bytes_via_next(self, dest)
706    }
707
708    fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
709        Ok(self.fill_bytes(dest))
710    }
711}
712
713impl CryptoRng for JitterRng {}
714