1use crate::convert::*;
2use crate::operations::folded_multiply;
3use crate::operations::read_small;
4use crate::operations::MULTIPLE;
5use crate::random_state::PI;
6use crate::RandomState;
7use core::hash::Hasher;
89const ROT: u32 = 23; //17
1011/// A `Hasher` for hashing an arbitrary stream of bytes.
12///
13/// Instances of [`AHasher`] represent state that is updated while hashing data.
14///
15/// Each method updates the internal state based on the new data provided. Once
16/// all of the data has been provided, the resulting hash can be obtained by calling
17/// `finish()`
18///
19/// [Clone] is also provided in case you wish to calculate hashes for two different items that
20/// start with the same data.
21///
22#[derive(Debug, Clone)]
23pub struct AHasher {
24 buffer: u64,
25 pad: u64,
26 extra_keys: [u64; 2],
27}
2829impl AHasher {
30/// Creates a new hasher keyed to the provided key.
31#[inline]
32 #[allow(dead_code)] // Is not called if non-fallback hash is used.
33pub(crate) fn new_with_keys(key1: u128, key2: u128) -> AHasher {
34let pi: [u128; 2] = PI.convert();
35let key1: [u64; 2] = (key1 ^ pi[0]).convert();
36let key2: [u64; 2] = (key2 ^ pi[1]).convert();
37 AHasher {
38 buffer: key1[0],
39 pad: key1[1],
40 extra_keys: key2,
41 }
42 }
4344#[allow(unused)] // False positive
45pub(crate) fn test_with_keys(key1: u128, key2: u128) -> Self {
46let key1: [u64; 2] = key1.convert();
47let key2: [u64; 2] = key2.convert();
48Self {
49 buffer: key1[0],
50 pad: key1[1],
51 extra_keys: key2,
52 }
53 }
5455#[inline]
56 #[allow(dead_code)] // Is not called if non-fallback hash is used.
57pub(crate) fn from_random_state(rand_state: &RandomState) -> AHasher {
58 AHasher {
59 buffer: rand_state.k1,
60 pad: rand_state.k0,
61 extra_keys: [rand_state.k2, rand_state.k3],
62 }
63 }
6465/// This update function has the goal of updating the buffer with a single multiply
66 /// FxHash does this but is vulnerable to attack. To avoid this input needs to be masked to with an
67 /// unpredictable value. Other hashes such as murmurhash have taken this approach but were found vulnerable
68 /// to attack. The attack was based on the idea of reversing the pre-mixing (Which is necessarily
69 /// reversible otherwise bits would be lost) then placing a difference in the highest bit before the
70 /// multiply used to mix the data. Because a multiply can never affect the bits to the right of it, a
71 /// subsequent update that also differed in this bit could result in a predictable collision.
72 ///
73 /// This version avoids this vulnerability while still only using a single multiply. It takes advantage
74 /// of the fact that when a 64 bit multiply is performed the upper 64 bits are usually computed and thrown
75 /// away. Instead it creates two 128 bit values where the upper 64 bits are zeros and multiplies them.
76 /// (The compiler is smart enough to turn this into a 64 bit multiplication in the assembly)
77 /// Then the upper bits are xored with the lower bits to produce a single 64 bit result.
78 ///
79 /// To understand why this is a good scrambling function it helps to understand multiply-with-carry PRNGs:
80 /// https://en.wikipedia.org/wiki/Multiply-with-carry_pseudorandom_number_generator
81 /// If the multiple is chosen well, this creates a long period, decent quality PRNG.
82 /// Notice that this function is equivalent to this except the `buffer`/`state` is being xored with each
83 /// new block of data. In the event that data is all zeros, it is exactly equivalent to a MWC PRNG.
84 ///
85 /// This is impervious to attack because every bit buffer at the end is dependent on every bit in
86 /// `new_data ^ buffer`. For example suppose two inputs differed in only the 5th bit. Then when the
87 /// multiplication is performed the `result` will differ in bits 5-69. More specifically it will differ by
88 /// 2^5 * MULTIPLE. However in the next step bits 65-128 are turned into a separate 64 bit value. So the
89 /// differing bits will be in the lower 6 bits of this value. The two intermediate values that differ in
90 /// bits 5-63 and in bits 0-5 respectively get added together. Producing an output that differs in every
91 /// bit. The addition carries in the multiplication and at the end additionally mean that the even if an
92 /// attacker somehow knew part of (but not all) the contents of the buffer before hand,
93 /// they would not be able to predict any of the bits in the buffer at the end.
94#[inline(always)]
95fn update(&mut self, new_data: u64) {
96self.buffer = folded_multiply(new_data ^ self.buffer, MULTIPLE);
97 }
9899/// Similar to the above this function performs an update using a "folded multiply".
100 /// However it takes in 128 bits of data instead of 64. Both halves must be masked.
101 ///
102 /// This makes it impossible for an attacker to place a single bit difference between
103 /// two blocks so as to cancel each other.
104 ///
105 /// However this is not sufficient. to prevent (a,b) from hashing the same as (b,a) the buffer itself must
106 /// be updated between calls in a way that does not commute. To achieve this XOR and Rotate are used.
107 /// Add followed by xor is not the same as xor followed by add, and rotate ensures that the same out bits
108 /// can't be changed by the same set of input bits. To cancel this sequence with subsequent input would require
109 /// knowing the keys.
110#[inline(always)]
111fn large_update(&mut self, new_data: u128) {
112let block: [u64; 2] = new_data.convert();
113let combined = folded_multiply(block[0] ^ self.extra_keys[0], block[1] ^ self.extra_keys[1]);
114self.buffer = (self.buffer.wrapping_add(self.pad) ^ combined).rotate_left(ROT);
115 }
116117#[inline]
118 #[cfg(feature = "specialize")]
119fn short_finish(&self) -> u64 {
120 folded_multiply(self.buffer, self.pad)
121 }
122}
123124/// Provides [Hasher] methods to hash all of the primitive types.
125///
126/// [Hasher]: core::hash::Hasher
127impl Hasher for AHasher {
128#[inline]
129fn write_u8(&mut self, i: u8) {
130self.update(i as u64);
131 }
132133#[inline]
134fn write_u16(&mut self, i: u16) {
135self.update(i as u64);
136 }
137138#[inline]
139fn write_u32(&mut self, i: u32) {
140self.update(i as u64);
141 }
142143#[inline]
144fn write_u64(&mut self, i: u64) {
145self.update(i as u64);
146 }
147148#[inline]
149fn write_u128(&mut self, i: u128) {
150self.large_update(i);
151 }
152153#[inline]
154 #[cfg(any(
155 target_pointer_width = "64",
156 target_pointer_width = "32",
157 target_pointer_width = "16"
158))]
159fn write_usize(&mut self, i: usize) {
160self.write_u64(i as u64);
161 }
162163#[inline]
164 #[cfg(target_pointer_width = "128")]
165fn write_usize(&mut self, i: usize) {
166self.write_u128(i as u128);
167 }
168169#[inline]
170 #[allow(clippy::collapsible_if)]
171fn write(&mut self, input: &[u8]) {
172let mut data = input;
173let length = data.len() as u64;
174//Needs to be an add rather than an xor because otherwise it could be canceled with carefully formed input.
175self.buffer = self.buffer.wrapping_add(length).wrapping_mul(MULTIPLE);
176//A 'binary search' on sizes reduces the number of comparisons.
177if data.len() > 8 {
178if data.len() > 16 {
179let tail = data.read_last_u128();
180self.large_update(tail);
181while data.len() > 16 {
182let (block, rest) = data.read_u128();
183self.large_update(block);
184 data = rest;
185 }
186 } else {
187self.large_update([data.read_u64().0, data.read_last_u64()].convert());
188 }
189 } else {
190let value = read_small(data);
191self.large_update(value.convert());
192 }
193 }
194195#[inline]
196fn finish(&self) -> u64 {
197let rot = (self.buffer & 63) as u32;
198 folded_multiply(self.buffer, self.pad).rotate_left(rot)
199 }
200}
201202#[cfg(feature = "specialize")]
203pub(crate) struct AHasherU64 {
204pub(crate) buffer: u64,
205pub(crate) pad: u64,
206}
207208/// A specialized hasher for only primitives under 64 bits.
209#[cfg(feature = "specialize")]
210impl Hasher for AHasherU64 {
211#[inline]
212fn finish(&self) -> u64 {
213 folded_multiply(self.buffer, self.pad)
214//self.buffer
215}
216217#[inline]
218fn write(&mut self, _bytes: &[u8]) {
219unreachable!("Specialized hasher was called with a different type of object")
220 }
221222#[inline]
223fn write_u8(&mut self, i: u8) {
224self.write_u64(i as u64);
225 }
226227#[inline]
228fn write_u16(&mut self, i: u16) {
229self.write_u64(i as u64);
230 }
231232#[inline]
233fn write_u32(&mut self, i: u32) {
234self.write_u64(i as u64);
235 }
236237#[inline]
238fn write_u64(&mut self, i: u64) {
239self.buffer = folded_multiply(i ^ self.buffer, MULTIPLE);
240 }
241242#[inline]
243fn write_u128(&mut self, _i: u128) {
244unreachable!("Specialized hasher was called with a different type of object")
245 }
246247#[inline]
248fn write_usize(&mut self, _i: usize) {
249unreachable!("Specialized hasher was called with a different type of object")
250 }
251}
252253#[cfg(feature = "specialize")]
254pub(crate) struct AHasherFixed(pub AHasher);
255256/// A specialized hasher for fixed size primitives larger than 64 bits.
257#[cfg(feature = "specialize")]
258impl Hasher for AHasherFixed {
259#[inline]
260fn finish(&self) -> u64 {
261self.0.short_finish()
262 }
263264#[inline]
265fn write(&mut self, bytes: &[u8]) {
266self.0.write(bytes)
267 }
268269#[inline]
270fn write_u8(&mut self, i: u8) {
271self.write_u64(i as u64);
272 }
273274#[inline]
275fn write_u16(&mut self, i: u16) {
276self.write_u64(i as u64);
277 }
278279#[inline]
280fn write_u32(&mut self, i: u32) {
281self.write_u64(i as u64);
282 }
283284#[inline]
285fn write_u64(&mut self, i: u64) {
286self.0.write_u64(i);
287 }
288289#[inline]
290fn write_u128(&mut self, i: u128) {
291self.0.write_u128(i);
292 }
293294#[inline]
295fn write_usize(&mut self, i: usize) {
296self.0.write_usize(i);
297 }
298}
299300#[cfg(feature = "specialize")]
301pub(crate) struct AHasherStr(pub AHasher);
302303/// A specialized hasher for a single string
304/// Note that the other types don't panic because the hash impl for String tacks on an unneeded call. (As does vec)
305#[cfg(feature = "specialize")]
306impl Hasher for AHasherStr {
307#[inline]
308fn finish(&self) -> u64 {
309self.0.finish()
310 }
311312#[inline]
313fn write(&mut self, bytes: &[u8]) {
314if bytes.len() > 8 {
315self.0.write(bytes)
316 } else {
317let value = read_small(bytes);
318self.0.buffer = folded_multiply(value[0] ^ self.0.buffer, value[1] ^ self.0.extra_keys[1]);
319self.0.pad = self.0.pad.wrapping_add(bytes.len() as u64);
320 }
321 }
322323#[inline]
324fn write_u8(&mut self, _i: u8) {}
325326#[inline]
327fn write_u16(&mut self, _i: u16) {}
328329#[inline]
330fn write_u32(&mut self, _i: u32) {}
331332#[inline]
333fn write_u64(&mut self, _i: u64) {}
334335#[inline]
336fn write_u128(&mut self, _i: u128) {}
337338#[inline]
339fn write_usize(&mut self, _i: usize) {}
340}
341342#[cfg(test)]
343mod tests {
344use crate::fallback_hash::*;
345346#[test]
347fn test_hash() {
348let mut hasher = AHasher::new_with_keys(0, 0);
349let value: u64 = 1 << 32;
350 hasher.update(value);
351let result = hasher.buffer;
352let mut hasher = AHasher::new_with_keys(0, 0);
353let value2: u64 = 1;
354 hasher.update(value2);
355let result2 = hasher.buffer;
356let result: [u8; 8] = result.convert();
357let result2: [u8; 8] = result2.convert();
358assert_ne!(hex::encode(result), hex::encode(result2));
359 }
360361#[test]
362fn test_conversion() {
363let input: &[u8] = "dddddddd".as_bytes();
364let bytes: u64 = as_array!(input, 8).convert();
365assert_eq!(bytes, 0x6464646464646464);
366 }
367}