rand/rngs/adapter/reseeding.rs
1// Copyright 2018 Developers of the Rand project.
2// Copyright 2013 The Rust Project Developers.
3//
4// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7// option. This file may not be copied, modified, or distributed
8// except according to those terms.
9
10//! A wrapper around another PRNG that reseeds it after it
11//! generates a certain number of random bytes.
12
13use core::mem::size_of;
14
15use rand_core::block::{BlockRng, BlockRngCore};
16use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
17
18/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
19/// ability to reseed it.
20///
21/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
22///
23/// - On a manual call to [`reseed()`].
24/// - After `clone()`, the clone will be reseeded on first use.
25/// - After a process is forked, the RNG in the child process is reseeded within
26/// the next few generated values, depending on the block size of the
27/// underlying PRNG. For ChaCha and Hc128 this is a maximum of
28/// 15 `u32` values before reseeding.
29/// - After the PRNG has generated a configurable number of random bytes.
30///
31/// # When should reseeding after a fixed number of generated bytes be used?
32///
33/// Reseeding after a fixed number of generated bytes is never strictly
34/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
35/// can output, or at least not a limit reachable in any practical way. There is
36/// no such thing as 'running out of entropy'.
37///
38/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
39/// if in the future a cryptographic weakness is found in the CSPRNG being used,
40/// or a flaw in the implementation, occasionally reseeding should make
41/// exploiting it much more difficult or even impossible.
42///
43/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
44/// after a fixed number of generated bytes.
45///
46/// # Error handling
47///
48/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
49/// never panic but try to handle the error intelligently through some
50/// combination of retrying and delaying reseeding until later.
51/// If handling the source error fails `ReseedingRng` will continue generating
52/// data from the wrapped PRNG without reseeding.
53///
54/// Manually calling [`reseed()`] will not have this retry or delay logic, but
55/// reports the error.
56///
57/// # Example
58///
59/// ```
60/// use rand::prelude::*;
61/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that
62/// // implements BlockRngCore
63/// use rand::rngs::OsRng;
64/// use rand::rngs::adapter::ReseedingRng;
65///
66/// let prng = ChaCha20Core::from_entropy();
67/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng);
68///
69/// println!("{}", reseeding_rng.gen::<u64>());
70///
71/// let mut cloned_rng = reseeding_rng.clone();
72/// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>());
73/// ```
74///
75/// [`BlockRngCore`]: rand_core::block::BlockRngCore
76/// [`ReseedingRng::new`]: ReseedingRng::new
77/// [`reseed()`]: ReseedingRng::reseed
78#[derive(Debug)]
79pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
80where
81 R: BlockRngCore + SeedableRng,
82 Rsdr: RngCore;
83
84impl<R, Rsdr> ReseedingRng<R, Rsdr>
85where
86 R: BlockRngCore + SeedableRng,
87 Rsdr: RngCore,
88{
89 /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
90 /// to use as reseeder.
91 ///
92 /// `threshold` sets the number of generated bytes after which to reseed the
93 /// PRNG. Set it to zero to never reseed based on the number of generated
94 /// values.
95 pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
96 ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
97 }
98
99 /// Reseed the internal PRNG.
100 pub fn reseed(&mut self) -> Result<(), Error> {
101 self.0.core.reseed()
102 }
103}
104
105// TODO: this should be implemented for any type where the inner type
106// implements RngCore, but we can't specify that because ReseedingCore is private
107impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
108where
109 R: BlockRngCore<Item = u32> + SeedableRng,
110 <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>,
111{
112 #[inline(always)]
113 fn next_u32(&mut self) -> u32 {
114 self.0.next_u32()
115 }
116
117 #[inline(always)]
118 fn next_u64(&mut self) -> u64 {
119 self.0.next_u64()
120 }
121
122 fn fill_bytes(&mut self, dest: &mut [u8]) {
123 self.0.fill_bytes(dest)
124 }
125
126 fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
127 self.0.try_fill_bytes(dest)
128 }
129}
130
131impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
132where
133 R: BlockRngCore + SeedableRng + Clone,
134 Rsdr: RngCore + Clone,
135{
136 fn clone(&self) -> ReseedingRng<R, Rsdr> {
137 // Recreating `BlockRng` seems easier than cloning it and resetting
138 // the index.
139 ReseedingRng(BlockRng::new(self.0.core.clone()))
140 }
141}
142
143impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
144where
145 R: BlockRngCore + SeedableRng + CryptoRng,
146 Rsdr: RngCore + CryptoRng,
147{
148}
149
150#[derive(Debug)]
151struct ReseedingCore<R, Rsdr> {
152 inner: R,
153 reseeder: Rsdr,
154 threshold: i64,
155 bytes_until_reseed: i64,
156 fork_counter: usize,
157}
158
159impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
160where
161 R: BlockRngCore + SeedableRng,
162 Rsdr: RngCore,
163{
164 type Item = <R as BlockRngCore>::Item;
165 type Results = <R as BlockRngCore>::Results;
166
167 fn generate(&mut self, results: &mut Self::Results) {
168 let global_fork_counter = fork::get_fork_counter();
169 if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) {
170 // We get better performance by not calling only `reseed` here
171 // and continuing with the rest of the function, but by directly
172 // returning from a non-inlined function.
173 return self.reseed_and_generate(results, global_fork_counter);
174 }
175 let num_bytes = results.as_ref().len() * size_of::<Self::Item>();
176 self.bytes_until_reseed -= num_bytes as i64;
177 self.inner.generate(results);
178 }
179}
180
181impl<R, Rsdr> ReseedingCore<R, Rsdr>
182where
183 R: BlockRngCore + SeedableRng,
184 Rsdr: RngCore,
185{
186 /// Create a new `ReseedingCore`.
187 fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
188 use ::core::i64::MAX;
189 fork::register_fork_handler();
190
191 // Because generating more values than `i64::MAX` takes centuries on
192 // current hardware, we just clamp to that value.
193 // Also we set a threshold of 0, which indicates no limit, to that
194 // value.
195 let threshold = if threshold == 0 {
196 MAX
197 } else if threshold <= MAX as u64 {
198 threshold as i64
199 } else {
200 MAX
201 };
202
203 ReseedingCore {
204 inner: rng,
205 reseeder,
206 threshold: threshold as i64,
207 bytes_until_reseed: threshold as i64,
208 fork_counter: 0,
209 }
210 }
211
212 /// Reseed the internal PRNG.
213 fn reseed(&mut self) -> Result<(), Error> {
214 R::from_rng(&mut self.reseeder).map(|result| {
215 self.bytes_until_reseed = self.threshold;
216 self.inner = result
217 })
218 }
219
220 fn is_forked(&self, global_fork_counter: usize) -> bool {
221 // In theory, on 32-bit platforms, it is possible for
222 // `global_fork_counter` to wrap around after ~4e9 forks.
223 //
224 // This check will detect a fork in the normal case where
225 // `fork_counter < global_fork_counter`, and also when the difference
226 // between both is greater than `isize::MAX` (wrapped around).
227 //
228 // It will still fail to detect a fork if there have been more than
229 // `isize::MAX` forks, without any reseed in between. Seems unlikely
230 // enough.
231 (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
232 }
233
234 #[inline(never)]
235 fn reseed_and_generate(
236 &mut self, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize,
237 ) {
238 #![allow(clippy::if_same_then_else)] // false positive
239 if self.is_forked(global_fork_counter) {
240 info!("Fork detected, reseeding RNG");
241 } else {
242 trace!("Reseeding RNG (periodic reseed)");
243 }
244
245 let num_bytes = results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>();
246
247 if let Err(e) = self.reseed() {
248 warn!("Reseeding RNG failed: {}", e);
249 let _ = e;
250 }
251 self.fork_counter = global_fork_counter;
252
253 self.bytes_until_reseed = self.threshold - num_bytes as i64;
254 self.inner.generate(results);
255 }
256}
257
258impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
259where
260 R: BlockRngCore + SeedableRng + Clone,
261 Rsdr: RngCore + Clone,
262{
263 fn clone(&self) -> ReseedingCore<R, Rsdr> {
264 ReseedingCore {
265 inner: self.inner.clone(),
266 reseeder: self.reseeder.clone(),
267 threshold: self.threshold,
268 bytes_until_reseed: 0, // reseed clone on first use
269 fork_counter: self.fork_counter,
270 }
271 }
272}
273
274impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr>
275where
276 R: BlockRngCore + SeedableRng + CryptoRng,
277 Rsdr: RngCore + CryptoRng,
278{
279}
280
281
282#[cfg(all(unix, not(target_os = "emscripten")))]
283mod fork {
284 use core::sync::atomic::{AtomicUsize, Ordering};
285 use std::sync::Once;
286
287 // Fork protection
288 //
289 // We implement fork protection on Unix using `pthread_atfork`.
290 // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
291 // Every `ReseedingRng` stores the last known value of the static in
292 // `fork_counter`. If the cached `fork_counter` is less than
293 // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
294 //
295 // If reseeding fails, we don't deal with this by setting a delay, but just
296 // don't update `fork_counter`, so a reseed is attempted as soon as
297 // possible.
298
299 static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0);
300
301 pub fn get_fork_counter() -> usize {
302 RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
303 }
304
305 extern "C" fn fork_handler() {
306 // Note: fetch_add is defined to wrap on overflow
307 // (which is what we want).
308 RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
309 }
310
311 pub fn register_fork_handler() {
312 static REGISTER: Once = Once::new();
313 REGISTER.call_once(|| unsafe {
314 libc::pthread_atfork(None, None, Some(fork_handler));
315 });
316 }
317}
318
319#[cfg(not(all(unix, not(target_os = "emscripten"))))]
320mod fork {
321 pub fn get_fork_counter() -> usize {
322 0
323 }
324 pub fn register_fork_handler() {}
325}
326
327
328#[cfg(feature = "std_rng")]
329#[cfg(test)]
330mod test {
331 use super::ReseedingRng;
332 use crate::rngs::mock::StepRng;
333 use crate::rngs::std::Core;
334 use crate::{Rng, SeedableRng};
335
336 #[test]
337 fn test_reseeding() {
338 let mut zero = StepRng::new(0, 0);
339 let rng = Core::from_rng(&mut zero).unwrap();
340 let thresh = 1; // reseed every time the buffer is exhausted
341 let mut reseeding = ReseedingRng::new(rng, thresh, zero);
342
343 // RNG buffer size is [u32; 64]
344 // Debug is only implemented up to length 32 so use two arrays
345 let mut buf = ([0u32; 32], [0u32; 32]);
346 reseeding.fill(&mut buf.0);
347 reseeding.fill(&mut buf.1);
348 let seq = buf;
349 for _ in 0..10 {
350 reseeding.fill(&mut buf.0);
351 reseeding.fill(&mut buf.1);
352 assert_eq!(buf, seq);
353 }
354 }
355
356 #[test]
357 fn test_clone_reseeding() {
358 #![allow(clippy::redundant_clone)]
359
360 let mut zero = StepRng::new(0, 0);
361 let rng = Core::from_rng(&mut zero).unwrap();
362 let mut rng1 = ReseedingRng::new(rng, 32 * 4, zero);
363
364 let first: u32 = rng1.gen();
365 for _ in 0..10 {
366 let _ = rng1.gen::<u32>();
367 }
368
369 let mut rng2 = rng1.clone();
370 assert_eq!(first, rng2.gen::<u32>());
371 }
372}