fsverity_merkle/
util.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::{SHA256_SALT_PADDING, SHA512_SALT_PADDING};
6use anyhow::{Error, anyhow, ensure};
7use fidl_fuchsia_io as fio;
8use mundane::hash::{Digest, Hasher, Sha256, Sha512};
9use std::fmt;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12/// `FsVerityHasherOptions` contains relevant metadata for the FsVerityHasher. The `salt` is set
13/// according to the FsverityMetadata struct stored in fxfs and `block_size` is that of the
14/// filesystem.
15#[derive(Clone)]
16pub struct FsVerityHasherOptions {
17    salt: Vec<u8>,
18    block_size: usize,
19    fsverity: bool,
20}
21
22impl FsVerityHasherOptions {
23    pub fn new(salt: Vec<u8>, block_size: usize) -> Self {
24        FsVerityHasherOptions { salt, block_size, fsverity: true }
25    }
26
27    pub fn new_dmverity(salt: Vec<u8>, block_size: usize) -> Self {
28        FsVerityHasherOptions { salt, block_size, fsverity: false }
29    }
30}
31
32/// The raw structure of an FsVerity descriptor. The values in this are not necessarily valid.
33#[derive(Debug, KnownLayout, FromBytes, Immutable, IntoBytes)]
34#[repr(C, packed)]
35pub struct FsVerityDescriptorRaw {
36    version: u8,
37    algorithm: u8,
38    block_size_log2: u8,
39    salt_size: u8,
40    _reserved_1: [u8; 4],
41    file_size: [u8; 8],
42    root_digest: [u8; 64],
43    salt: [u8; 32],
44    _reserved_2: [u8; 144],
45}
46
47impl FsVerityDescriptorRaw {
48    pub fn new(
49        algorithm: fio::HashAlgorithm,
50        block_size: u64,
51        file_size: u64,
52        root: &[u8],
53        salt: &[u8],
54    ) -> Result<Self, Error> {
55        ensure!(block_size.is_power_of_two() && block_size >= 1024, "Invalid merkle block size");
56        ensure!(salt.len() <= 32, "Salt too long");
57        let (hash_len, algorithm) = match algorithm {
58            fio::HashAlgorithm::Sha256 => (<Sha256 as Hasher>::Digest::DIGEST_LEN, 1),
59            fio::HashAlgorithm::Sha512 => (<Sha512 as Hasher>::Digest::DIGEST_LEN, 2),
60            _ => return Err(anyhow!("Unknown hash type")),
61        };
62        ensure!(root.len() == hash_len, "Wrong length of root digest");
63
64        let mut this = Self {
65            version: 1,
66            algorithm,
67            block_size_log2: block_size.trailing_zeros() as u8,
68            salt_size: salt.len() as u8,
69            _reserved_1: [0u8; 4],
70            file_size: file_size.to_le_bytes(),
71            root_digest: [0u8; 64],
72            salt: [0u8; 32],
73            _reserved_2: [0u8; 144],
74        };
75        this.root_digest.as_mut_slice()[0..hash_len].copy_from_slice(root);
76        this.salt.as_mut_slice()[0..salt.len()].copy_from_slice(salt);
77        Ok(this)
78    }
79
80    pub fn write_to_slice(&self, dest: &mut [u8]) -> Result<(), Error> {
81        self.write_to_prefix(dest).map_err(|_| anyhow!("Buffer too short"))
82    }
83}
84
85/// A descriptor struct for fsverity. It does not own the bytes backing it.
86#[derive(Debug)]
87pub struct FsVerityDescriptor<'a> {
88    inner: &'a FsVerityDescriptorRaw,
89    bytes: &'a [u8],
90}
91
92impl<'a> FsVerityDescriptor<'a> {
93    /// Create a descriptor from the raw bytes of the entire block-aligned fsverity data.
94    pub fn from_bytes(bytes: &'a [u8], block_size: usize) -> Result<Self, Error> {
95        ensure!(block_size.is_power_of_two() && block_size > 0, "Invalid block size.");
96        // Descriptor is placed in the last block. Go to the start of the last block.
97        let descriptor_offset = if bytes.len() == 0 {
98            // This will fail properly below.
99            0
100        } else {
101            ((bytes.len() - 1) / block_size) * block_size
102        };
103        let inner = FsVerityDescriptorRaw::ref_from_prefix(&bytes[descriptor_offset..])
104            .map_err(|_| anyhow!("Descriptor bytes too small"))?
105            .0;
106
107        ensure!(inner.version == 1, "Unsupported version {}", inner.version);
108
109        ensure!(
110            inner.algorithm == 1 || inner.algorithm == 2,
111            "Unsupported algorithm {}",
112            inner.algorithm
113        );
114
115        // Merkle block size here doesn't necessarily need to match fs block size, but it is the
116        // most efficient choice, greatly simplifies handling, and is the only supported choice in
117        // the destination fxfs. It it stored in the descriptor as the log_2 of the value. It must
118        // be at least 1024 and also no more than system page size. We won't verify page size here
119        // but also won't support more than 64KiB.
120        ensure!(
121            inner.block_size_log2 >= 10 && inner.block_size_log2 <= 16,
122            "Only supports 1KiB-64KiB"
123        );
124
125        ensure!(inner.salt_size <= 32, "Salt too big for struct");
126        let this = Self { inner, bytes };
127        ensure!(this.block_size() == block_size, "Only support same block size as file system");
128        Ok(this)
129    }
130
131    pub fn digest_len(&self) -> usize {
132        match self.inner.algorithm {
133            1 => <Sha256 as Hasher>::Digest::DIGEST_LEN,
134            2 => <Sha512 as Hasher>::Digest::DIGEST_LEN,
135            _ => unreachable!("This should be verified at creation time."),
136        }
137    }
138
139    pub fn digest_algorithm(&self) -> fio::HashAlgorithm {
140        match self.inner.algorithm {
141            1 => fio::HashAlgorithm::Sha256,
142            2 => fio::HashAlgorithm::Sha512,
143            _ => unreachable!("This should be verified at creation time."),
144        }
145    }
146
147    pub fn block_size(&self) -> usize {
148        1usize << self.inner.block_size_log2
149    }
150
151    pub fn file_size(&self) -> usize {
152        u64::from_le_bytes(self.inner.file_size) as usize
153    }
154
155    pub fn root_digest(&self) -> &'a [u8] {
156        &self.inner.root_digest[..self.digest_len()]
157    }
158
159    pub fn salt(&self) -> &'a [u8] {
160        &self.inner.salt[..self.inner.salt_size as usize]
161    }
162
163    /// Return a hasher configured based on this descriptor.
164    pub fn hasher(&self) -> FsVerityHasher {
165        match self.inner.algorithm {
166            1 => FsVerityHasher::Sha256(FsVerityHasherOptions::new(
167                self.salt().to_vec(),
168                self.block_size(),
169            )),
170            2 => FsVerityHasher::Sha512(FsVerityHasherOptions::new(
171                self.salt().to_vec(),
172                self.block_size(),
173            )),
174            _ => unreachable!("This should be verified at creation time."),
175        }
176    }
177
178    /// A slice of all the leaf digests required for the file.
179    pub fn leaf_digests(&self) -> Result<&'a [u8], Error> {
180        let block_size = self.block_size();
181        Ok(match self.file_size().div_ceil(block_size) {
182            0 => [0u8; 0].as_slice(),
183            1 => self.root_digest(),
184            file_blocks => {
185                let leaf_size = file_blocks * self.digest_len();
186                let layer_size = leaf_size.next_multiple_of(block_size);
187                let descriptor_offset = ((self.bytes.len() - 1) / block_size) * block_size;
188                ensure!(descriptor_offset >= layer_size, "No space for leaves in descriptor");
189                let leaf_offset = descriptor_offset - layer_size;
190                &self.bytes[leaf_offset..(leaf_offset + leaf_size)]
191            }
192        })
193    }
194}
195
196/// `FsVerityHasher` is used by fsverity to construct merkle trees for verity-enabled files.
197/// `FsVerityHasher` is parameterized by a salt and a block size.
198#[derive(Clone)]
199pub enum FsVerityHasher {
200    Sha256(FsVerityHasherOptions),
201    Sha512(FsVerityHasherOptions),
202}
203
204impl fmt::Debug for FsVerityHasher {
205    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
206        match self {
207            FsVerityHasher::Sha256(metadata) => f
208                .debug_struct("FsVerityHasher::Sha256")
209                .field("salt", &metadata.salt)
210                .field("block_size", &metadata.block_size)
211                .finish(),
212            FsVerityHasher::Sha512(metadata) => f
213                .debug_struct("FsVerityHasher::Sha512")
214                .field("salt", &metadata.salt)
215                .field("block_size", &metadata.block_size)
216                .finish(),
217        }
218    }
219}
220
221impl FsVerityHasher {
222    pub fn block_size(&self) -> usize {
223        match self {
224            FsVerityHasher::Sha256(metadata) => metadata.block_size,
225            FsVerityHasher::Sha512(metadata) => metadata.block_size,
226        }
227    }
228
229    pub fn hash_size(&self) -> usize {
230        match self {
231            FsVerityHasher::Sha256(_) => <Sha256 as Hasher>::Digest::DIGEST_LEN,
232            FsVerityHasher::Sha512(_) => <Sha512 as Hasher>::Digest::DIGEST_LEN,
233        }
234    }
235
236    pub fn fsverity(&self) -> bool {
237        match &self {
238            FsVerityHasher::Sha256(metadata) => metadata.fsverity,
239            FsVerityHasher::Sha512(metadata) => metadata.fsverity,
240        }
241    }
242
243    /// Computes the MerkleTree digest from a `block` of data.
244    ///
245    /// A MerkleTree digest is a hash of a block of data. The block will be zero filled if its
246    /// len is less than the block_size, except for when the first data block is completely empty.
247    /// If `salt.len() > 0`, we prepend the block with the salt which itself is zero filled up
248    /// to the padding.
249    ///
250    /// # Panics
251    ///
252    /// Panics if `block.len()` exceeds `self.block_size()`.
253    pub fn hash_block(&self, block: &[u8]) -> Vec<u8> {
254        match self {
255            FsVerityHasher::Sha256(metadata) => {
256                if block.is_empty() {
257                    // Empty files have a root hash of all zeroes.
258                    return vec![0; <Sha256 as Hasher>::Digest::DIGEST_LEN];
259                }
260                assert!(block.len() <= metadata.block_size);
261                let mut hasher = Sha256::default();
262                let salt_size = metadata.salt.len() as u8;
263
264                if salt_size > 0 {
265                    hasher.update(&metadata.salt);
266                    if metadata.fsverity && salt_size % SHA256_SALT_PADDING != 0 {
267                        hasher.update(&vec![
268                            0;
269                            (SHA256_SALT_PADDING - salt_size % SHA256_SALT_PADDING)
270                                as usize
271                        ])
272                    }
273                }
274
275                hasher.update(block);
276                // Zero fill block up to self.block_size(). As a special case, if the first data
277                // block is completely empty, it is not zero filled.
278                if block.len() != metadata.block_size {
279                    hasher.update(&vec![0; metadata.block_size - block.len()]);
280                }
281                hasher.finish().bytes().to_vec()
282            }
283            FsVerityHasher::Sha512(metadata) => {
284                if block.is_empty() {
285                    // Empty files have a root hash of all zeroes.
286                    return vec![0; <Sha512 as Hasher>::Digest::DIGEST_LEN];
287                }
288                assert!(block.len() <= metadata.block_size);
289                let mut hasher = Sha512::default();
290                let salt_size = metadata.salt.len() as u8;
291
292                if salt_size > 0 {
293                    hasher.update(&metadata.salt);
294                    if metadata.fsverity && salt_size % SHA512_SALT_PADDING != 0 {
295                        hasher.update(&vec![
296                            0;
297                            (SHA512_SALT_PADDING - salt_size % SHA512_SALT_PADDING)
298                                as usize
299                        ])
300                    }
301                }
302
303                hasher.update(block);
304                // Zero fill block up to self.block_size(). As a special case, if the first data
305                // block is completely empty, it is not zero filled.
306                if block.len() != metadata.block_size {
307                    hasher.update(&vec![0; metadata.block_size - block.len()]);
308                }
309                hasher.finish().bytes().to_vec()
310            }
311        }
312    }
313
314    /// Computes a MerkleTree digest from a block of `hashes`.
315    ///
316    /// Like `hash_block`, `hash_hashes` zero fills incomplete buffers and prepends the digests
317    /// with a salt, which is zero filled up to the padding.
318    ///
319    /// # Panics
320    ///
321    /// Panics if any of the following conditions are met:
322    /// - `hashes.len()` is 0
323    /// - `hashes.len() > self.block_size() / digest length`
324    pub fn hash_hashes(&self, hashes: &[Vec<u8>]) -> Vec<u8> {
325        assert_ne!(hashes.len(), 0);
326        match self {
327            FsVerityHasher::Sha256(metadata) => {
328                assert!(
329                    hashes.len() <= (metadata.block_size / <Sha256 as Hasher>::Digest::DIGEST_LEN)
330                );
331                let mut hasher = Sha256::default();
332                let salt_size = metadata.salt.len() as u8;
333                if salt_size > 0 {
334                    hasher.update(&metadata.salt);
335                    if metadata.fsverity && salt_size % SHA256_SALT_PADDING != 0 {
336                        hasher.update(&vec![
337                            0;
338                            (SHA256_SALT_PADDING - salt_size % SHA256_SALT_PADDING)
339                                as usize
340                        ])
341                    }
342                }
343
344                for hash in hashes {
345                    hasher.update(hash.as_slice());
346                }
347                for _ in 0..((metadata.block_size / <Sha256 as Hasher>::Digest::DIGEST_LEN)
348                    - hashes.len())
349                {
350                    hasher.update(&[0; <Sha256 as Hasher>::Digest::DIGEST_LEN]);
351                }
352
353                hasher.finish().bytes().to_vec()
354            }
355            FsVerityHasher::Sha512(metadata) => {
356                assert!(
357                    hashes.len() <= (metadata.block_size / <Sha512 as Hasher>::Digest::DIGEST_LEN)
358                );
359
360                let mut hasher = Sha512::default();
361                let salt_size = metadata.salt.len() as u8;
362                if salt_size > 0 {
363                    hasher.update(&metadata.salt);
364                    if metadata.fsverity && salt_size % SHA512_SALT_PADDING != 0 {
365                        hasher.update(&vec![
366                            0;
367                            (SHA512_SALT_PADDING - salt_size % SHA512_SALT_PADDING)
368                                as usize
369                        ])
370                    }
371                }
372
373                for hash in hashes {
374                    hasher.update(hash.as_slice());
375                }
376                for _ in 0..((metadata.block_size / <Sha512 as Hasher>::Digest::DIGEST_LEN)
377                    - hashes.len())
378                {
379                    hasher.update(&[0; <Sha512 as Hasher>::Digest::DIGEST_LEN]);
380                }
381
382                hasher.finish().bytes().to_vec()
383            }
384        }
385    }
386}
387
388#[cfg(test)]
389mod tests {
390    use super::*;
391    use crate::MerkleTreeBuilder;
392    use fidl_fuchsia_io as fio;
393    use hex::FromHex;
394    use test_case::test_case;
395
396    const BLOCK_SIZE: usize = 4096;
397
398    #[test]
399    fn test_hash_block_empty_sha256() {
400        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
401        let block = [];
402        let hash = hasher.hash_block(&block[..]);
403        assert_eq!(hash, [0; 32]);
404    }
405
406    #[test]
407    fn test_hash_block_empty_sha512() {
408        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
409        let block = [];
410        let hash = hasher.hash_block(&block[..]);
411        assert_eq!(hash, [0; 64]);
412    }
413
414    #[test]
415    fn test_hash_block_partial_block_sha256() {
416        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
417        let block = vec![0xFF; hasher.block_size()];
418        let mut block2: Vec<u8> = vec![0xFF; hasher.block_size() / 2];
419        block2.append(&mut vec![0; hasher.block_size() / 2]);
420        let hash = hasher.hash_block(&block[..]);
421        let expected = hasher.hash_block(&block[..]);
422        assert_eq!(hash, expected);
423    }
424
425    #[test]
426    fn test_hash_block_partial_block_sha512() {
427        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
428        let block = vec![0xFF; hasher.block_size()];
429        let mut block2: Vec<u8> = vec![0xFF; hasher.block_size() / 2];
430        block2.append(&mut vec![0; hasher.block_size() / 2]);
431        let hash = hasher.hash_block(&block[..]);
432        let expected = hasher.hash_block(&block[..]);
433        assert_eq!(hash, expected);
434    }
435
436    #[test]
437    fn test_hash_block_single_sha256() {
438        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
439        let block = vec![0xFF; hasher.block_size()];
440        let hash = hasher.hash_block(&block[..]);
441        // Root hash of file size 4096 = block_size
442        let expected: [u8; 32] =
443            FromHex::from_hex("207f18729b037894447f948b81f63abe68007d0cd7c99a4ae0a3e323c52013a5")
444                .unwrap();
445        assert_eq!(hash, expected);
446    }
447
448    #[test]
449    fn test_hash_block_single_sha512() {
450        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
451        let block = vec![0xFF; hasher.block_size()];
452        let hash = hasher.hash_block(&block[..]);
453        // Root hash of file size 4096 = block_size
454        let expected: [u8; 64] = FromHex::from_hex("96d217a5f593384eb266b4bb2574b93c145ff1fd5ca89af52af6d4a14d2ce5200b2ddad30771c7cbcd139688e1a3847da7fd681490690adc945c3776154c42f6").unwrap();
455        assert_eq!(hash, expected);
456    }
457
458    #[test]
459    fn test_hash_hashes_full_block_sha256() {
460        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
461        let mut leafs = Vec::new();
462        {
463            let block = vec![0xFF; hasher.block_size()];
464            for _i in 0..hasher.block_size() / hasher.hash_size() {
465                leafs.push(hasher.hash_block(&block));
466            }
467        }
468        let root = hasher.hash_hashes(&leafs);
469        // Root hash of file size 524288 = block_size * (block_size / hash_size) = 4096 * (4096 / 32)
470        let expected: [u8; 32] =
471            FromHex::from_hex("827c28168aba953cf74706d4f3e776bd8892f6edf7b25d89645409f24108fb0b")
472                .unwrap();
473        assert_eq!(root, expected);
474    }
475
476    #[test]
477    fn test_hash_hashes_full_block_sha512() {
478        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
479        let mut leafs = Vec::new();
480        {
481            let block = vec![0xFF; hasher.block_size()];
482            for _i in 0..hasher.block_size() / hasher.hash_size() {
483                leafs.push(hasher.hash_block(&block));
484            }
485        }
486        let root = hasher.hash_hashes(&leafs);
487        // Root hash of file size 262144 = block_size * (block_size / hash_size) = 4096 * (4096 / 64)
488        let expected: [u8; 64] = FromHex::from_hex("17d1728518330e0d48951ba43908ea7ad73ea018597643aabba9af2e43dea70468ba54fa09f9c7d02b1c240bd8009d1abd49c05559815a3b73ce31c5c26f93ba").unwrap();
489        assert_eq!(root, expected);
490    }
491
492    #[test_case(FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096)); "sha256")]
493    #[test_case(FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096)); "sha512")]
494    fn test_hash_hashes_zero_pad_same_length(hasher: FsVerityHasher) {
495        let data_hash = hasher.hash_block(&vec![0xFF; hasher.block_size()]);
496        let mut zero_hash = Vec::with_capacity(hasher.hash_size());
497        zero_hash.extend(std::iter::repeat(0).take(hasher.hash_size()));
498        let hash_of_single_hash = hasher.hash_hashes(&[data_hash.clone()]);
499        let hash_of_single_hash_and_zero_hash = hasher.hash_hashes(&[data_hash, zero_hash]);
500        assert_eq!(hash_of_single_hash, hash_of_single_hash_and_zero_hash);
501    }
502
503    #[test_case(vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE ; "test_exact_size")]
504    #[test_case(vec![0u8; 256], 0 ; "test_exact_size_from_zero")]
505    #[test_case(vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE ; "test_block_aligned")]
506    #[test_case(vec![0u8; BLOCK_SIZE], 0 ; "test_block_aligned_from_zero")]
507    #[test_case(vec![0u8; BLOCK_SIZE + 300], BLOCK_SIZE ; "test_trailing_space")]
508    #[test_case(vec![0u8; 300], 0 ; "test_trailing_space_from_zero")]
509    fn descriptor_read_write_locations(mut buf: Vec<u8>, descriptor_offset: usize) {
510        let salt = [4u8; 6];
511        let root = [65u8; 32];
512        let descriptor = FsVerityDescriptorRaw::new(
513            fio::HashAlgorithm::Sha256,
514            BLOCK_SIZE as u64,
515            8192,
516            root.as_slice(),
517            salt.as_slice(),
518        )
519        .expect("Create raw descriptor");
520
521        descriptor
522            .write_to_slice(&mut buf.as_mut_slice()[descriptor_offset..])
523            .expect("Writing to buf.");
524
525        let descriptor2 = FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE)
526            .expect("Parsing descriptor back");
527        // Verify the raw values.
528        assert_eq!(descriptor2.inner.version, descriptor.version);
529        assert_eq!(descriptor2.inner.algorithm, descriptor.algorithm);
530        assert_eq!(descriptor2.inner.block_size_log2, descriptor.block_size_log2);
531        assert_eq!(descriptor2.inner.salt_size, descriptor.salt_size);
532        assert_eq!(descriptor2.inner.file_size, descriptor.file_size);
533        assert_eq!(descriptor2.inner.root_digest, descriptor.root_digest);
534        assert_eq!(descriptor2.inner.salt, descriptor.salt);
535
536        // Verify the processed values.
537        assert_eq!(descriptor2.file_size(), 8192);
538        assert_eq!(descriptor2.digest_len(), 32);
539        assert_eq!(descriptor2.digest_algorithm(), fio::HashAlgorithm::Sha256);
540        assert_eq!(descriptor2.root_digest(), root.as_slice());
541        assert_eq!(descriptor2.salt(), salt.as_slice());
542    }
543
544    #[test_case(2, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256")]
545    #[test_case(2, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512")]
546    // Enough blocks to have a second layer of merkle tree.
547    #[test_case(129, vec![0u8; BLOCK_SIZE * 3], BLOCK_SIZE * 2, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_big_file")]
548    #[test_case(129, vec![0u8; BLOCK_SIZE * 4], BLOCK_SIZE * 3, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_big_file")]
549    // Don't block align the end, just enough space for the descriptor.
550    #[test_case(2, vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_exact_fit")]
551    #[test_case(2, vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_exact_fit")]
552    // A really big merkle buffer, everything should still be at the end of it.
553    #[test_case(2, vec![0u8; BLOCK_SIZE * 100], BLOCK_SIZE * 99, BLOCK_SIZE * 98, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_big_buf")]
554    #[test_case(2, vec![0u8; BLOCK_SIZE * 100], BLOCK_SIZE * 99, BLOCK_SIZE * 98, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_big_buf")]
555    // File has only a single block. This is a special case for generating the leaf data.
556    #[test_case(1, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_one_block")]
557    #[test_case(1, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_one_block")]
558    // File has no data blocks. This is a special case for generating the leaf data.
559    #[test_case(0, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_empty_file")]
560    #[test_case(0, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_empty_file")]
561    fn descriptor_merkle_leaves_locations(
562        file_blocks: usize,
563        mut buf: Vec<u8>,
564        descriptor_offset: usize,
565        leaf_offset: usize,
566        hasher: FsVerityHasher,
567    ) {
568        let mut file = vec![0u8; BLOCK_SIZE * file_blocks];
569        for i in 0..file_blocks {
570            let offset = i * BLOCK_SIZE;
571            file.as_mut_slice()[offset..(offset + BLOCK_SIZE)].fill(i as u8);
572        }
573
574        let (algorithm, salt) = match &hasher {
575            FsVerityHasher::Sha256(options) => (fio::HashAlgorithm::Sha256, options.salt.clone()),
576            FsVerityHasher::Sha512(options) => (fio::HashAlgorithm::Sha512, options.salt.clone()),
577        };
578
579        let hash_size = hasher.hash_size();
580        let mut builder = MerkleTreeBuilder::new(hasher);
581        builder.write(file.as_slice());
582        let tree = builder.finish();
583
584        let descriptor = FsVerityDescriptorRaw::new(
585            algorithm,
586            BLOCK_SIZE as u64,
587            file.len() as u64,
588            tree.root(),
589            salt.as_slice(),
590        )
591        .expect("Creating raw descriptor");
592
593        descriptor
594            .write_to_slice(&mut buf.as_mut_slice()[descriptor_offset..])
595            .expect("Writing descriptor");
596        // FsVerity doesn't actually write out the leaves if there is one or fewer blocks.
597        if file_blocks > 1 {
598            let leaf_bytes: Vec<u8> = tree.as_ref()[0].iter().flatten().copied().collect();
599            buf.as_mut_slice()[leaf_offset..(leaf_offset + (file_blocks * hash_size))]
600                .copy_from_slice(leaf_bytes.as_slice());
601        }
602
603        let descriptor2 =
604            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing decsriptor");
605        assert_eq!(descriptor2.root_digest(), tree.root());
606
607        let mut verifier_builder = MerkleTreeBuilder::new(descriptor2.hasher());
608        let leaves = descriptor2.leaf_digests().expect("Finding leaf digests");
609        for leaf in leaves.chunks_exact(hash_size) {
610            verifier_builder.push_data_hash(leaf.to_vec());
611        }
612
613        let verifier_tree = verifier_builder.finish();
614        assert_eq!(verifier_tree.root(), tree.root());
615    }
616
617    #[test]
618    fn test_raw_descriptor_failure_cases() {
619        // The base case is valid.
620        let descriptor = FsVerityDescriptorRaw::new(
621            fio::HashAlgorithm::Sha256,
622            BLOCK_SIZE as u64,
623            12,
624            &[0u8; 32],
625            &[0u8; 32],
626        )
627        .expect("Creating valid descriptor");
628        {
629            let mut buf = vec![0u8; 256];
630            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
631        }
632
633        // Try with buf too small.
634        {
635            let mut buf = vec![0u8; 200];
636            descriptor.write_to_slice(buf.as_mut_slice()).expect_err("Buffer too small");
637        }
638
639        // Block is too small or not power of two.
640        FsVerityDescriptorRaw::new(fio::HashAlgorithm::Sha256, 256, 12, &[0u8; 32], &[0u8; 32])
641            .expect_err("Bad block size");
642        FsVerityDescriptorRaw::new(
643            fio::HashAlgorithm::Sha256,
644            4097 as u64,
645            12,
646            &[0u8; 32],
647            &[0u8; 32],
648        )
649        .expect_err("Bad block size");
650
651        // Salt is too long.
652        FsVerityDescriptorRaw::new(
653            fio::HashAlgorithm::Sha256,
654            BLOCK_SIZE as u64,
655            12,
656            &[0u8; 32],
657            &[0u8; 33],
658        )
659        .expect_err("Bad salt");
660
661        // Hash length wrong at 33
662        FsVerityDescriptorRaw::new(
663            fio::HashAlgorithm::Sha256,
664            BLOCK_SIZE as u64,
665            12,
666            &[0u8; 33],
667            &[0u8; 32],
668        )
669        .expect_err("Bad hash length");
670        FsVerityDescriptorRaw::new(
671            fio::HashAlgorithm::Sha512,
672            BLOCK_SIZE as u64,
673            12,
674            &[0u8; 33],
675            &[0u8; 32],
676        )
677        .expect_err("Bad hash length");
678    }
679
680    #[test]
681    fn test_descriptor_buf_too_small_for_leaves() {
682        let raw_descriptor = FsVerityDescriptorRaw {
683            version: 1,
684            algorithm: 1,
685            block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
686            salt_size: 8,
687            _reserved_1: [0u8; 4],
688            file_size: 3000000u64.to_le_bytes(),
689            root_digest: [0u8; 64],
690            salt: [0u8; 32],
691            _reserved_2: [0u8; 144],
692        };
693        let mut buf = vec![0u8; BLOCK_SIZE * 2];
694        raw_descriptor.write_to_slice(&mut buf[BLOCK_SIZE..]).expect("Writing out descriptor");
695        let descriptor =
696            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing fine");
697        descriptor.leaf_digests().expect_err("Not enough space for leaves");
698    }
699
700    #[test]
701    fn test_descriptor_from_bytes_validation() {
702        // Base case, success.
703        {
704            let descriptor = FsVerityDescriptorRaw {
705                version: 1,
706                algorithm: 1,
707                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
708                salt_size: 8,
709                _reserved_1: [0u8; 4],
710                file_size: 25u64.to_le_bytes(),
711                root_digest: [0u8; 64],
712                salt: [0u8; 32],
713                _reserved_2: [0u8; 144],
714            };
715            let mut buf = vec![0u8; 256];
716            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
717            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing fine");
718        }
719
720        // Buffer too small to parse.
721        {
722            let buf = vec![0u8; 200];
723            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Buff too small");
724        }
725
726        // Bad block sizes provided to method
727        {
728            let descriptor = FsVerityDescriptorRaw {
729                version: 1,
730                algorithm: 1,
731                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
732                salt_size: 8,
733                _reserved_1: [0u8; 4],
734                file_size: 25u64.to_le_bytes(),
735                root_digest: [0u8; 64],
736                salt: [0u8; 32],
737                _reserved_2: [0u8; 144],
738            };
739            let mut buf = vec![0u8; 256];
740            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
741            FsVerityDescriptor::from_bytes(buf.as_slice(), 4097)
742                .expect_err("Bad provided block size");
743        }
744        {
745            let descriptor = FsVerityDescriptorRaw {
746                version: 1,
747                algorithm: 1,
748                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
749                salt_size: 8,
750                _reserved_1: [0u8; 4],
751                file_size: 25u64.to_le_bytes(),
752                root_digest: [0u8; 64],
753                salt: [0u8; 32],
754                _reserved_2: [0u8; 144],
755            };
756            let mut buf = vec![0u8; 256];
757            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
758            FsVerityDescriptor::from_bytes(buf.as_slice(), 0).expect_err("Bad provided block size");
759        }
760
761        // Bad version
762        {
763            let descriptor = FsVerityDescriptorRaw {
764                version: 2,
765                algorithm: 1,
766                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
767                salt_size: 8,
768                _reserved_1: [0u8; 4],
769                file_size: 25u64.to_le_bytes(),
770                root_digest: [0u8; 64],
771                salt: [0u8; 32],
772                _reserved_2: [0u8; 144],
773            };
774            let mut buf = vec![0u8; 256];
775            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
776            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad version");
777        }
778
779        // Bad algorithm type.
780        {
781            let descriptor = FsVerityDescriptorRaw {
782                version: 1,
783                algorithm: 3,
784                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
785                salt_size: 8,
786                _reserved_1: [0u8; 4],
787                file_size: 25u64.to_le_bytes(),
788                root_digest: [0u8; 64],
789                salt: [0u8; 32],
790                _reserved_2: [0u8; 144],
791            };
792            let mut buf = vec![0u8; 256];
793            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
794            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad algorithm");
795        }
796
797        // Bad block size. Too small.
798        {
799            let descriptor = FsVerityDescriptorRaw {
800                version: 1,
801                algorithm: 1,
802                block_size_log2: 9,
803                salt_size: 8,
804                _reserved_1: [0u8; 4],
805                file_size: 25u64.to_le_bytes(),
806                root_digest: [0u8; 64],
807                salt: [0u8; 32],
808                _reserved_2: [0u8; 144],
809            };
810            let mut buf = vec![0u8; 256];
811            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
812            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad block size");
813        }
814
815        // Bad block size. Too big.
816        {
817            let descriptor = FsVerityDescriptorRaw {
818                version: 1,
819                algorithm: 1,
820                block_size_log2: 128,
821                salt_size: 8,
822                _reserved_1: [0u8; 4],
823                file_size: 25u64.to_le_bytes(),
824                root_digest: [0u8; 64],
825                salt: [0u8; 32],
826                _reserved_2: [0u8; 144],
827            };
828            let mut buf = vec![0u8; 256];
829            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
830            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad block size");
831        }
832
833        // Salt size too big.
834        {
835            let descriptor = FsVerityDescriptorRaw {
836                version: 1,
837                algorithm: 1,
838                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
839                salt_size: 40,
840                _reserved_1: [0u8; 4],
841                file_size: 25u64.to_le_bytes(),
842                root_digest: [0u8; 64],
843                salt: [0u8; 32],
844                _reserved_2: [0u8; 144],
845            };
846            let mut buf = vec![0u8; 256];
847            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
848            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad salt size");
849        }
850
851        // Block size doesn't match.
852        {
853            let descriptor = FsVerityDescriptorRaw {
854                version: 1,
855                algorithm: 1,
856                block_size_log2: 2048usize.trailing_zeros() as u8,
857                salt_size: 8,
858                _reserved_1: [0u8; 4],
859                file_size: 25u64.to_le_bytes(),
860                root_digest: [0u8; 64],
861                salt: [0u8; 32],
862                _reserved_2: [0u8; 144],
863            };
864            let mut buf = vec![0u8; 256];
865            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
866            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE)
867                .expect_err("Block size mismatch");
868        }
869    }
870}