sparse/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#[cfg(target_endian = "big")]
6assert!(false, "This library assumes little-endian!");
7
8pub mod builder;
9mod format;
10pub mod reader;
11
12use crate::format::{ChunkHeader, SparseHeader};
13use anyhow::{bail, ensure, Context, Result};
14use core::fmt;
15use serde::de::DeserializeOwned;
16use std::fs::File;
17use std::io::{Cursor, Read, Seek, SeekFrom, Write};
18use std::path::Path;
19use tempfile::{NamedTempFile, TempPath};
20
21// Size of blocks to write.  Note that the format supports varied block sizes; this is the preferred
22// size by this library.
23const BLK_SIZE: u32 = 0x1000;
24
25fn deserialize_from<'a, T: DeserializeOwned, R: Read + ?Sized>(source: &mut R) -> Result<T> {
26    let mut buf = vec![0u8; std::mem::size_of::<T>()];
27    source.read_exact(&mut buf[..]).context("Failed to read bytes")?;
28    Ok(bincode::deserialize(&buf[..])?)
29}
30
31/// A union trait for `Write` and `Seek` that also allows truncation.
32pub trait Writer: Write + Seek {
33    /// Sets the length of the output stream.
34    fn set_len(&mut self, size: u64) -> Result<()>;
35}
36
37impl Writer for File {
38    fn set_len(&mut self, size: u64) -> Result<()> {
39        Ok(File::set_len(self, size)?)
40    }
41}
42
43impl Writer for Cursor<Vec<u8>> {
44    fn set_len(&mut self, size: u64) -> Result<()> {
45        Vec::resize(self.get_mut(), size as usize, 0u8);
46        Ok(())
47    }
48}
49
50// A wrapper around a Reader, which makes it seem like the underlying stream is only self.1 bytes
51// long.  The underlying reader is still advanced upon reading.
52// This is distinct from `std::io::Take` in that it does not modify the seek offset of the
53// underlying reader.  In other words, `LimitedReader` can be used to read a window within the
54// reader (by setting seek offset to the start, and the size limit to the end).
55struct LimitedReader<'a, R>(pub &'a mut R, pub usize);
56
57impl<'a, R: Read + Seek> Read for LimitedReader<'a, R> {
58    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
59        let offset = self.0.stream_position()?;
60        let avail = self.1.saturating_sub(offset as usize);
61        let to_read = std::cmp::min(avail, buf.len());
62        self.0.read(&mut buf[..to_read])
63    }
64}
65
66/// Returns whether the image in `reader` appears to be in the sparse format.
67pub fn is_sparse_image<R: Read + Seek>(reader: &mut R) -> bool {
68    || -> Option<bool> {
69        let header: SparseHeader = deserialize_from(reader).ok()?;
70        let is_sparse = header.magic == format::SPARSE_HEADER_MAGIC;
71        reader.seek(SeekFrom::Start(0)).ok()?;
72        Some(is_sparse)
73    }()
74    .unwrap_or(false)
75}
76
77#[derive(Clone, PartialEq, Debug)]
78enum Chunk {
79    /// `Raw` represents a set of blocks to be written to disk as-is.
80    /// `start` is the offset in the expanded image at which the Raw section starts.
81    /// `start` and `size` are in bytes, but must be block-aligned.
82    Raw { start: u64, size: u32 },
83    /// `Fill` represents a Chunk that has the `value` repeated enough to fill `size` bytes.
84    /// `start` is the offset in the expanded image at which the Fill section starts.
85    /// `start` and `size` are in bytes, but must be block-aligned.
86    Fill { start: u64, size: u32, value: u32 },
87    /// `DontCare` represents a set of blocks that need to be "offset" by the
88    /// image recipient.  If an image needs to be broken up into two sparse images, and we flash n
89    /// bytes for Sparse Image 1, Sparse Image 2 needs to start with a DontCareChunk with
90    /// (n/blocksize) blocks as its "size" property.
91    /// `start` is the offset in the expanded image at which the DontCare section starts.
92    /// `start` and `size` are in bytes, but must be block-aligned.
93    DontCare { start: u64, size: u32 },
94    /// `Crc32Chunk` is used as a checksum of a given set of Chunks for a SparseImage.  This is not
95    /// required and unused in most implementations of the Sparse Image format. The type is included
96    /// for completeness. It has 4 bytes of CRC32 checksum as describable in a u32.
97    #[allow(dead_code)]
98    Crc32 { checksum: u32 },
99}
100
101impl Chunk {
102    /// Attempts to read a `Chunk` from `reader`.  The reader will be positioned at the first byte
103    /// following the chunk header and any extra data; for a Raw chunk this means it will point at
104    /// the data payload, and for other chunks it will point at the next chunk header (or EOF).
105    /// `offset` is the current offset in the logical volume.
106    pub fn read_metadata<R: Read>(reader: &mut R, offset: u64, block_size: u32) -> Result<Self> {
107        let header: ChunkHeader =
108            deserialize_from(reader).context("Failed to read chunk header")?;
109        ensure!(header.valid(), "Invalid chunk header");
110
111        let size = header
112            .chunk_sz
113            .checked_mul(block_size)
114            .context("Chunk size * block size can not be larger than 2^32")?;
115        match header.chunk_type {
116            format::CHUNK_TYPE_RAW => Ok(Self::Raw { start: offset, size }),
117            format::CHUNK_TYPE_FILL => {
118                let value: u32 =
119                    deserialize_from(reader).context("Failed to deserialize fill value")?;
120                Ok(Self::Fill { start: offset, size, value })
121            }
122            format::CHUNK_TYPE_DONT_CARE => Ok(Self::DontCare { start: offset, size }),
123            format::CHUNK_TYPE_CRC32 => {
124                let checksum: u32 =
125                    deserialize_from(reader).context("Failed to deserialize checksum")?;
126                Ok(Self::Crc32 { checksum })
127            }
128            // We already validated the chunk_type in `ChunkHeader::is_valid`.
129            _ => unreachable!(),
130        }
131    }
132
133    fn valid(&self, block_size: u32) -> bool {
134        self.output_size() % block_size == 0
135    }
136
137    /// Returns the offset into the logical image the chunk refers to, or None if the chunk has no
138    /// output data.
139    fn output_offset(&self) -> Option<u64> {
140        match self {
141            Self::Raw { start, .. } => Some(*start),
142            Self::Fill { start, .. } => Some(*start),
143            Self::DontCare { start, .. } => Some(*start),
144            Self::Crc32 { .. } => None,
145        }
146    }
147
148    /// Return number of bytes the chunk expands to when written to the partition.
149    fn output_size(&self) -> u32 {
150        match self {
151            Self::Raw { size, .. } => *size,
152            Self::Fill { size, .. } => *size,
153            Self::DontCare { size, .. } => *size,
154            Self::Crc32 { .. } => 0,
155        }
156    }
157
158    /// Return number of blocks the chunk expands to when written to the partition.
159    fn output_blocks(&self, block_size: u32) -> u32 {
160        self.output_size().div_ceil(block_size)
161    }
162
163    /// `chunk_type` returns the integer flag to represent the type of chunk
164    /// to use in the ChunkHeader
165    fn chunk_type(&self) -> u16 {
166        match self {
167            Self::Raw { .. } => format::CHUNK_TYPE_RAW,
168            Self::Fill { .. } => format::CHUNK_TYPE_FILL,
169            Self::DontCare { .. } => format::CHUNK_TYPE_DONT_CARE,
170            Self::Crc32 { .. } => format::CHUNK_TYPE_CRC32,
171        }
172    }
173
174    /// `chunk_data_len` returns the length of the chunk's header plus the
175    /// length of the data when serialized
176    fn chunk_data_len(&self) -> u32 {
177        let header_size = format::CHUNK_HEADER_SIZE;
178        let data_size = match self {
179            Self::Raw { size, .. } => *size,
180            Self::Fill { .. } => std::mem::size_of::<u32>() as u32,
181            Self::DontCare { .. } => 0,
182            Self::Crc32 { .. } => std::mem::size_of::<u32>() as u32,
183        };
184        header_size.checked_add(data_size).unwrap()
185    }
186
187    /// Writes the chunk to the given Writer.  `source` is a Reader containing the data payload for
188    /// a Raw type chunk, with the seek offset pointing to the first byte of the data payload, and
189    /// with exactly enough bytes available for the rest of the data payload.
190    fn write<W: Write, R: Read>(
191        &self,
192        source: Option<&mut R>,
193        dest: &mut W,
194        block_size: u32,
195    ) -> Result<()> {
196        ensure!(self.valid(block_size), "Not writing invalid chunk",);
197        let header = ChunkHeader::new(
198            self.chunk_type(),
199            0x0,
200            self.output_blocks(block_size),
201            self.chunk_data_len(),
202        );
203
204        bincode::serialize_into(&mut *dest, &header)?;
205
206        match self {
207            Self::Raw { size, .. } => {
208                ensure!(source.is_some(), "No source for Raw chunk");
209                let n = std::io::copy(source.unwrap(), dest)?;
210                let size = *size as u64;
211                if n < size {
212                    let zeroes = vec![0u8; (size - n) as usize];
213                    dest.write_all(&zeroes)?;
214                }
215            }
216            Self::Fill { value, .. } => {
217                // Serialize the value,
218                bincode::serialize_into(dest, value)?;
219            }
220            Self::DontCare { .. } => {
221                // DontCare has no data to write
222            }
223            Self::Crc32 { checksum } => {
224                bincode::serialize_into(dest, checksum)?;
225            }
226        }
227        Ok(())
228    }
229}
230
231impl fmt::Display for Chunk {
232    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
233        let message = match self {
234            Self::Raw { start, size } => {
235                format!("RawChunk: start: {}, total bytes: {}", start, size)
236            }
237            Self::Fill { start, size, value } => {
238                format!("FillChunk: start: {}, value: {}, n_blocks: {}", start, value, size)
239            }
240            Self::DontCare { start, size } => {
241                format!("DontCareChunk: start: {}, bytes: {}", start, size)
242            }
243            Self::Crc32 { checksum } => format!("Crc32Chunk: checksum: {:?}", checksum),
244        };
245        write!(f, "{}", message)
246    }
247}
248
249/// Chunk::write takes an Option of something that implements Read. The compiler still requires a
250/// concrete type for the generic argument even when the Option is None. This constant can be used
251/// in place of None to avoid having to specify a type for the source.
252pub const NO_SOURCE: Option<&mut Cursor<&[u8]>> = None;
253
254#[derive(Clone, Debug, PartialEq)]
255struct SparseFileWriter {
256    chunks: Vec<Chunk>,
257}
258
259impl SparseFileWriter {
260    fn new(chunks: Vec<Chunk>) -> SparseFileWriter {
261        SparseFileWriter { chunks }
262    }
263
264    fn total_blocks(&self) -> u32 {
265        self.chunks.iter().map(|c| c.output_blocks(BLK_SIZE)).sum()
266    }
267
268    fn total_bytes(&self) -> u64 {
269        self.chunks.iter().map(|c| c.output_size() as u64).sum()
270    }
271
272    fn write<W: Write + Seek, R: Read + Seek>(&self, reader: &mut R, writer: &mut W) -> Result<()> {
273        let header = SparseHeader::new(
274            BLK_SIZE.try_into().unwrap(),          // Size of the blocks
275            self.total_blocks(),                   // Total blocks in this image
276            self.chunks.len().try_into().unwrap(), // Total chunks in this image
277        );
278
279        bincode::serialize_into(&mut *writer, &header)?;
280
281        for chunk in &self.chunks {
282            let mut reader = if let &Chunk::Raw { start, size } = chunk {
283                reader.seek(SeekFrom::Start(start))?;
284                Some(LimitedReader(reader, start as usize + size as usize))
285            } else {
286                None
287            };
288            chunk.write(reader.as_mut(), writer, BLK_SIZE)?;
289        }
290
291        Ok(())
292    }
293}
294
295impl fmt::Display for SparseFileWriter {
296    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
297        write!(f, r"SparseFileWriter: {} Chunks:", self.chunks.len())
298    }
299}
300
301/// `add_sparse_chunk` takes the input vec, v and given `Chunk`, chunk, and
302/// attempts to add the chunk to the end of the vec. If the current last chunk
303/// is the same kind of Chunk as the `chunk`, then it will merge the two chunks
304/// into one chunk.
305///
306/// Example: A `FillChunk` with value 0 and size 1 is the last chunk
307/// in `v`, and `chunk` is a FillChunk with value 0 and size 1, after this,
308/// `v`'s last element will be a FillChunk with value 0 and size 2.
309fn add_sparse_chunk(r: &mut Vec<Chunk>, chunk: Chunk) -> Result<()> {
310    match r.last_mut() {
311        // We've got something in the Vec... if they are both the same type,
312        // merge them, otherwise, just push the new one
313        Some(last) => match (&last, &chunk) {
314            (Chunk::Raw { start, size }, Chunk::Raw { size: new_length, .. })
315                if size.checked_add(*new_length).is_some() =>
316            {
317                *last = Chunk::Raw { start: *start, size: size + new_length };
318                return Ok(());
319            }
320            (
321                Chunk::Fill { start, size, value },
322                Chunk::Fill { size: new_size, value: new_value, .. },
323            ) if value == new_value && size.checked_add(*new_size).is_some() => {
324                *last = Chunk::Fill { start: *start, size: size + new_size, value: *value };
325                return Ok(());
326            }
327            (Chunk::DontCare { start, size }, Chunk::DontCare { size: new_size, .. })
328                if size.checked_add(*new_size).is_some() =>
329            {
330                *last = Chunk::DontCare { start: *start, size: size + new_size };
331                return Ok(());
332            }
333            _ => {}
334        },
335        None => {}
336    }
337
338    // If the chunk types differ they cannot be merged.
339    // If they are both Fill but have different values, they cannot be merged.
340    // Crc32 cannot be merged.
341    // If we don't have any chunks then we add it
342    r.push(chunk);
343    Ok(())
344}
345
346/// Reads a sparse image from `source` and expands it to its unsparsed representation in `dest`.
347pub fn unsparse<W: Writer, R: Read + Seek>(source: &mut R, dest: &mut W) -> Result<()> {
348    let header: SparseHeader = deserialize_from(source).context("Failed to read header")?;
349    ensure!(header.valid(), "Invalid sparse image header {:?}", header);
350
351    for _ in 0..header.total_chunks {
352        expand_chunk(source, dest, header.blk_sz).context("Failed to expand chunk")?;
353    }
354    // Truncate output to its current seek offset, in case the last chunk we wrote was DontNeed.
355    let offset = dest.stream_position()?;
356    dest.set_len(offset).context("Failed to truncate output")?;
357    dest.flush()?;
358    Ok(())
359}
360
361/// Reads a chunk from `source`, and expands it, writing the result to `dest`.
362fn expand_chunk<R: Read + Seek, W: Write + Seek>(
363    source: &mut R,
364    dest: &mut W,
365    block_size: u32,
366) -> Result<()> {
367    let header: ChunkHeader =
368        deserialize_from(source).context("Failed to deserialize chunk header")?;
369    ensure!(header.valid(), "Invalid chunk header {:x?}", header);
370    let size = (header.chunk_sz * block_size) as usize;
371    match header.chunk_type {
372        format::CHUNK_TYPE_RAW => {
373            let limit = source.stream_position()? as usize + size;
374            std::io::copy(&mut LimitedReader(source, limit), dest)
375                .context("Failed to copy contents")?;
376        }
377        format::CHUNK_TYPE_FILL => {
378            let value: [u8; 4] =
379                deserialize_from(source).context("Failed to deserialize fill value")?;
380            assert!(size % 4 == 0);
381            let repeated = value.repeat(size / 4);
382            dest.write_all(&repeated).context("Failed to fill contents")?;
383        }
384        format::CHUNK_TYPE_DONT_CARE => {
385            dest.seek(SeekFrom::Current(size as i64)).context("Failed to skip contents")?;
386        }
387        format::CHUNK_TYPE_CRC32 => {
388            let _: u32 = deserialize_from(source).context("Failed to deserialize fill value")?;
389        }
390        _ => bail!("Invalid type {}", header.chunk_type),
391    };
392    Ok(())
393}
394
395/// `resparse` takes a SparseFile and a maximum size and will
396/// break the single SparseFile into multiple SparseFiles whose
397/// size will not exceed the maximum_download_size.
398///
399/// This will return an error if max_download_size is <= BLK_SIZE
400fn resparse(
401    sparse_file: SparseFileWriter,
402    max_download_size: u64,
403) -> Result<Vec<SparseFileWriter>> {
404    if max_download_size <= BLK_SIZE as u64 {
405        anyhow::bail!(
406            "Given maximum download size ({}) is less than the block size ({})",
407            max_download_size,
408            BLK_SIZE
409        );
410    }
411    let mut ret = Vec::<SparseFileWriter>::new();
412
413    // File length already starts with a header for the SparseFile as
414    // well as the size of a potential DontCare and Crc32 Chunk
415    let sunk_file_length = format::SPARSE_HEADER_SIZE as u64
416        + Chunk::DontCare { start: 0, size: BLK_SIZE }.chunk_data_len() as u64
417        + Chunk::Crc32 { checksum: 2345 }.chunk_data_len() as u64;
418
419    let mut chunk_pos = 0;
420    let mut output_offset = 0;
421    while chunk_pos < sparse_file.chunks.len() {
422        log::trace!("Starting a new file at chunk position: {}", chunk_pos);
423
424        let mut file_len = 0;
425        file_len += sunk_file_length;
426
427        let mut chunks = Vec::<Chunk>::new();
428        if chunk_pos > 0 {
429            // If we already have some chunks... add a DontCare block to
430            // move the pointer
431            log::trace!("Adding a DontCare chunk offset: {}", chunk_pos);
432            let dont_care = Chunk::DontCare { start: 0, size: output_offset.try_into().unwrap() };
433            chunks.push(dont_care);
434        }
435
436        loop {
437            match sparse_file.chunks.get(chunk_pos) {
438                Some(chunk) => {
439                    let curr_chunk_data_len = chunk.chunk_data_len() as u64;
440                    if (file_len + curr_chunk_data_len) > max_download_size {
441                        log::trace!(
442                            "Current file size is: {} and adding another chunk of len: {} would \
443                             put us over our max: {}",
444                            file_len,
445                            curr_chunk_data_len,
446                            max_download_size
447                        );
448
449                        // Add a don't care chunk to cover everything to the end of the image. While
450                        // this is not strictly speaking needed, other tools (simg2simg) produce
451                        // this chunk, and the Sparse image inspection tool simg_dump will produce a
452                        // warning if a sparse file does not have the same number of output blocks
453                        // as declared in the header.
454                        let remainder_size = sparse_file.total_bytes() - output_offset;
455                        let dont_care = Chunk::DontCare {
456                            start: output_offset,
457                            size: remainder_size.try_into().unwrap(),
458                        };
459                        chunks.push(dont_care);
460                        break;
461                    }
462                    log::trace!(
463                        "chunk: {} curr_chunk_data_len: {} current file size: {} \
464                         max_download_size: {} diff: {}",
465                        chunk_pos,
466                        curr_chunk_data_len,
467                        file_len,
468                        max_download_size,
469                        (max_download_size - file_len - curr_chunk_data_len)
470                    );
471                    add_sparse_chunk(&mut chunks, chunk.clone())?;
472                    file_len += curr_chunk_data_len;
473                    chunk_pos = chunk_pos + 1;
474                    output_offset += chunk.output_size() as u64;
475                }
476                None => {
477                    log::trace!("Finished iterating chunks");
478                    break;
479                }
480            }
481        }
482        let resparsed = SparseFileWriter::new(chunks);
483        log::trace!("resparse: Adding new SparseFile: {}", resparsed);
484        ret.push(resparsed);
485    }
486
487    Ok(ret)
488}
489
490/// Takes the given `file_to_upload` for the `named` partition and creates a
491/// set of temporary files in the given `dir` in Sparse Image Format. With the
492/// provided `max_download_size` constraining file size.
493///
494/// # Arguments
495///
496/// * `name` - Name of the partition the image. Used for logs only.
497/// * `file_to_upload` - Path to the file to translate to sparse image format.
498/// * `dir` - Path to write the Sparse file(s).
499/// * `max_download_size` - Maximum size that can be downloaded by the device.
500pub fn build_sparse_files(
501    name: &str,
502    file_to_upload: &str,
503    dir: &Path,
504    max_download_size: u64,
505) -> Result<Vec<TempPath>> {
506    if max_download_size <= BLK_SIZE as u64 {
507        anyhow::bail!(
508            "Given maximum download size ({}) is less than the block size ({})",
509            max_download_size,
510            BLK_SIZE
511        );
512    }
513    log::debug!("Building sparse files for: {}. File: {}", name, file_to_upload);
514    let mut in_file = File::open(file_to_upload)?;
515
516    let mut total_read: usize = 0;
517    // Preallocate vector to avoid reallocations as it grows.
518    let mut chunks =
519        Vec::<Chunk>::with_capacity((in_file.metadata()?.len() as usize / BLK_SIZE as usize) + 1);
520    let mut buf = [0u8; BLK_SIZE as usize];
521    loop {
522        let read = in_file.read(&mut buf)?;
523        if read == 0 {
524            break;
525        }
526
527        let is_fill = buf.chunks(4).collect::<Vec<&[u8]>>().windows(2).all(|w| w[0] == w[1]);
528        if is_fill {
529            // The Android Sparse Image Format specifies that a fill block
530            // is a four-byte u32 repeated to fill BLK_SIZE. Here we use
531            // bincode::deserialize to get the repeated four byte pattern from
532            // the buffer so that it can be serialized later when we write
533            // the sparse file with bincode::serialize.
534            let value: u32 = bincode::deserialize(&buf[0..4])?;
535            // Add a fill chunk
536            let fill = Chunk::Fill {
537                start: total_read as u64,
538                size: buf.len().try_into().unwrap(),
539                value,
540            };
541            log::trace!("Sparsing file: {}. Created: {}", file_to_upload, fill);
542            chunks.push(fill);
543        } else {
544            // Add a raw chunk
545            let raw = Chunk::Raw { start: total_read as u64, size: buf.len().try_into().unwrap() };
546            log::trace!("Sparsing file: {}. Created: {}", file_to_upload, raw);
547            chunks.push(raw);
548        }
549        total_read += read;
550    }
551
552    log::trace!("Creating sparse file from: {} chunks", chunks.len());
553
554    // At this point we are making a new sparse file fom an unoptimized set of
555    // Chunks. This primarily means that adjacent Fill chunks of same value are
556    // not collapsed into a single Fill chunk (with a larger size). The advantage
557    // to this two pass approach is that (with some future work), we can create
558    // the "unoptimized" sparse file from a given image, and then "resparse" it
559    // as many times as desired with different `max_download_size` parameters.
560    // This would simplify the scenario where we want to flash the same image
561    // to multiple physical devices which may have slight differences in their
562    // hardware (and therefore different `max_download_size`es)
563    let sparse_file = SparseFileWriter::new(chunks);
564    log::trace!("Created sparse file: {}", sparse_file);
565
566    let mut ret = Vec::<TempPath>::new();
567    log::trace!("Resparsing sparse file");
568    for re_sparsed_file in resparse(sparse_file, max_download_size)? {
569        let (file, temp_path) = NamedTempFile::new_in(dir)?.into_parts();
570        let mut file_create = File::from(file);
571
572        log::trace!("Writing resparsed {} to disk", re_sparsed_file);
573        re_sparsed_file.write(&mut in_file, &mut file_create)?;
574
575        ret.push(temp_path);
576    }
577
578    log::debug!("Finished building sparse files");
579
580    Ok(ret)
581}
582
583////////////////////////////////////////////////////////////////////////////////
584// tests
585
586#[cfg(test)]
587mod test {
588    #[cfg(target_os = "linux")]
589    use crate::build_sparse_files;
590
591    use super::builder::{DataSource, SparseImageBuilder};
592    use super::{
593        add_sparse_chunk, resparse, unsparse, Chunk, SparseFileWriter, BLK_SIZE, NO_SOURCE,
594    };
595    use rand::rngs::SmallRng;
596    use rand::{RngCore, SeedableRng};
597    use std::io::{Cursor, Read as _, Seek as _, SeekFrom, Write as _};
598    #[cfg(target_os = "linux")]
599    use std::path::Path;
600    #[cfg(target_os = "linux")]
601    use std::process::{Command, Stdio};
602    use tempfile::{NamedTempFile, TempDir};
603
604    #[test]
605    fn test_fill_into_bytes() {
606        let mut dest = Cursor::new(Vec::<u8>::new());
607
608        let fill_chunk = Chunk::Fill { start: 0, size: 5 * BLK_SIZE, value: 365 };
609        fill_chunk.write(NO_SOURCE, &mut dest, BLK_SIZE).unwrap();
610        assert_eq!(dest.into_inner(), [194, 202, 0, 0, 5, 0, 0, 0, 16, 0, 0, 0, 109, 1, 0, 0]);
611    }
612
613    #[test]
614    fn test_raw_into_bytes() {
615        const EXPECTED_RAW_BYTES: [u8; 22] =
616            [193, 202, 0, 0, 1, 0, 0, 0, 12, 16, 0, 0, 49, 50, 51, 52, 53, 0, 0, 0, 0, 0];
617
618        let mut source = Cursor::new(Vec::<u8>::from(&b"12345"[..]));
619        let mut sparse = Cursor::new(Vec::<u8>::new());
620        let chunk = Chunk::Raw { start: 0, size: BLK_SIZE };
621
622        chunk.write(Some(&mut source), &mut sparse, BLK_SIZE).unwrap();
623        let buf = sparse.into_inner();
624        assert_eq!(buf.len(), 4108);
625        assert_eq!(&buf[..EXPECTED_RAW_BYTES.len()], EXPECTED_RAW_BYTES);
626        assert_eq!(&buf[EXPECTED_RAW_BYTES.len()..], &[0u8; 4108 - EXPECTED_RAW_BYTES.len()]);
627    }
628
629    #[test]
630    fn test_dont_care_into_bytes() {
631        let mut dest = Cursor::new(Vec::<u8>::new());
632        let chunk = Chunk::DontCare { start: 0, size: 5 * BLK_SIZE };
633
634        chunk.write(NO_SOURCE, &mut dest, BLK_SIZE).unwrap();
635        assert_eq!(dest.into_inner(), [195, 202, 0, 0, 5, 0, 0, 0, 12, 0, 0, 0]);
636    }
637
638    #[test]
639    fn test_sparse_file_into_bytes() {
640        let mut source = Cursor::new(Vec::<u8>::from(&b"123"[..]));
641        let mut sparse = Cursor::new(Vec::<u8>::new());
642        let mut chunks = Vec::<Chunk>::new();
643        // Add a fill chunk
644        let fill = Chunk::Fill { start: 0, size: 4096, value: 5 };
645        chunks.push(fill);
646        // Add a raw chunk
647        let raw = Chunk::Raw { start: 0, size: 12288 };
648        chunks.push(raw);
649        // Add a dontcare chunk
650        let dontcare = Chunk::DontCare { start: 0, size: 4096 };
651        chunks.push(dontcare);
652
653        let sparsefile = SparseFileWriter::new(chunks);
654        sparsefile.write(&mut source, &mut sparse).unwrap();
655
656        sparse.seek(SeekFrom::Start(0)).unwrap();
657        let mut unsparsed = Cursor::new(Vec::<u8>::new());
658        unsparse(&mut sparse, &mut unsparsed).unwrap();
659        let buf = unsparsed.into_inner();
660        assert_eq!(buf.len(), 4096 + 12288 + 4096);
661        {
662            let chunks = buf[..4096].chunks(4);
663            for chunk in chunks {
664                assert_eq!(chunk, &[5u8, 0, 0, 0]);
665            }
666        }
667        assert_eq!(&buf[4096..4099], b"123");
668        assert_eq!(&buf[4099..16384], &[0u8; 12285]);
669        assert_eq!(&buf[16384..], &[0u8; 4096]);
670    }
671
672    ////////////////////////////////////////////////////////////////////////////
673    // Tests for resparse
674
675    #[test]
676    fn test_resparse_bails_on_too_small_size() {
677        let sparse = SparseFileWriter::new(Vec::<Chunk>::new());
678        assert!(resparse(sparse, 4095).is_err());
679    }
680
681    #[test]
682    fn test_resparse_splits() {
683        let max_download_size = 4096 * 2;
684
685        let mut chunks = Vec::<Chunk>::new();
686        chunks.push(Chunk::Raw { start: 0, size: 4096 });
687        chunks.push(Chunk::Fill { start: 4096, size: 4096, value: 2 });
688        // We want 2 sparse files with the second sparse file having a
689        // DontCare chunk and then this chunk
690        chunks.push(Chunk::Raw { start: 8192, size: 4096 });
691
692        let input_sparse_file = SparseFileWriter::new(chunks);
693        let resparsed_files = resparse(input_sparse_file, max_download_size).unwrap();
694        assert_eq!(2, resparsed_files.len());
695
696        assert_eq!(3, resparsed_files[0].chunks.len());
697        assert_eq!(Chunk::Raw { start: 0, size: 4096 }, resparsed_files[0].chunks[0]);
698        assert_eq!(Chunk::Fill { start: 4096, size: 4096, value: 2 }, resparsed_files[0].chunks[1]);
699        assert_eq!(Chunk::DontCare { start: 8192, size: 4096 }, resparsed_files[0].chunks[2]);
700
701        assert_eq!(2, resparsed_files[1].chunks.len());
702        assert_eq!(Chunk::DontCare { start: 0, size: 8192 }, resparsed_files[1].chunks[0]);
703        assert_eq!(Chunk::Raw { start: 8192, size: 4096 }, resparsed_files[1].chunks[1]);
704    }
705
706    ////////////////////////////////////////////////////////////////////////////
707    // Tests for add_sparse_chunk
708
709    #[test]
710    fn test_add_sparse_chunk_adds_empty() {
711        let init_vec = Vec::<Chunk>::new();
712        let mut res = init_vec.clone();
713        add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
714        assert_eq!(0, init_vec.len());
715        assert_ne!(init_vec, res);
716        assert_eq!(Chunk::Fill { start: 0, size: 4096, value: 1 }, res[0]);
717    }
718
719    #[test]
720    fn test_add_sparse_chunk_fill() {
721        // Test they merge.
722        {
723            let mut init_vec = Vec::<Chunk>::new();
724            init_vec.push(Chunk::Fill { start: 0, size: 8192, value: 1 });
725            let mut res = init_vec.clone();
726            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 8192, value: 1 }).unwrap();
727            assert_eq!(1, res.len());
728            assert_eq!(Chunk::Fill { start: 0, size: 16384, value: 1 }, res[0]);
729        }
730
731        // Test don't merge on different value.
732        {
733            let mut init_vec = Vec::<Chunk>::new();
734            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 1 });
735            let mut res = init_vec.clone();
736            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 2 }).unwrap();
737            assert_ne!(res, init_vec);
738            assert_eq!(2, res.len());
739            assert_eq!(
740                res,
741                [
742                    Chunk::Fill { start: 0, size: 4096, value: 1 },
743                    Chunk::Fill { start: 0, size: 4096, value: 2 }
744                ]
745            );
746        }
747
748        // Test don't merge on different type.
749        {
750            let mut init_vec = Vec::<Chunk>::new();
751            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 2 });
752            let mut res = init_vec.clone();
753            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: 4096 }).unwrap();
754            assert_ne!(res, init_vec);
755            assert_eq!(2, res.len());
756            assert_eq!(
757                res,
758                [
759                    Chunk::Fill { start: 0, size: 4096, value: 2 },
760                    Chunk::DontCare { start: 0, size: 4096 }
761                ]
762            );
763        }
764
765        // Test don't merge when too large.
766        {
767            let mut init_vec = Vec::<Chunk>::new();
768            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 1 });
769            let mut res = init_vec.clone();
770            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: u32::MAX - 4095, value: 1 })
771                .unwrap();
772            assert_ne!(res, init_vec);
773            assert_eq!(2, res.len());
774            assert_eq!(
775                res,
776                [
777                    Chunk::Fill { start: 0, size: 4096, value: 1 },
778                    Chunk::Fill { start: 0, size: u32::MAX - 4095, value: 1 }
779                ]
780            );
781        }
782    }
783
784    #[test]
785    fn test_add_sparse_chunk_dont_care() {
786        // Test they merge.
787        {
788            let mut init_vec = Vec::<Chunk>::new();
789            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
790            let mut res = init_vec.clone();
791            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: 4096 }).unwrap();
792            assert_eq!(1, res.len());
793            assert_eq!(Chunk::DontCare { start: 0, size: 8192 }, res[0]);
794        }
795
796        // Test they don't merge on different type.
797        {
798            let mut init_vec = Vec::<Chunk>::new();
799            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
800            let mut res = init_vec.clone();
801            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
802            assert_eq!(2, res.len());
803            assert_eq!(
804                res,
805                [
806                    Chunk::DontCare { start: 0, size: 4096 },
807                    Chunk::Fill { start: 0, size: 4096, value: 1 }
808                ]
809            );
810        }
811
812        // Test they don't merge when too large.
813        {
814            let mut init_vec = Vec::<Chunk>::new();
815            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
816            let mut res = init_vec.clone();
817            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: u32::MAX - 4095 })
818                .unwrap();
819            assert_eq!(2, res.len());
820            assert_eq!(
821                res,
822                [
823                    Chunk::DontCare { start: 0, size: 4096 },
824                    Chunk::DontCare { start: 0, size: u32::MAX - 4095 }
825                ]
826            );
827        }
828    }
829
830    #[test]
831    fn test_add_sparse_chunk_raw() {
832        // Test they merge.
833        {
834            let mut init_vec = Vec::<Chunk>::new();
835            init_vec.push(Chunk::Raw { start: 0, size: 12288 });
836            let mut res = init_vec.clone();
837            add_sparse_chunk(&mut res, Chunk::Raw { start: 0, size: 16384 }).unwrap();
838            assert_eq!(1, res.len());
839            assert_eq!(Chunk::Raw { start: 0, size: 28672 }, res[0]);
840        }
841
842        // Test they don't merge on different type.
843        {
844            let mut init_vec = Vec::<Chunk>::new();
845            init_vec.push(Chunk::Raw { start: 0, size: 12288 });
846            let mut res = init_vec.clone();
847            add_sparse_chunk(&mut res, Chunk::Fill { start: 3, size: 8192, value: 1 }).unwrap();
848            assert_eq!(2, res.len());
849            assert_eq!(
850                res,
851                [
852                    Chunk::Raw { start: 0, size: 12288 },
853                    Chunk::Fill { start: 3, size: 8192, value: 1 }
854                ]
855            );
856        }
857
858        // Test they don't merge when too large.
859        {
860            let mut init_vec = Vec::<Chunk>::new();
861            init_vec.push(Chunk::Raw { start: 0, size: 4096 });
862            let mut res = init_vec.clone();
863            add_sparse_chunk(&mut res, Chunk::Raw { start: 0, size: u32::MAX - 4095 }).unwrap();
864            assert_eq!(2, res.len());
865            assert_eq!(
866                res,
867                [
868                    Chunk::Raw { start: 0, size: 4096 },
869                    Chunk::Raw { start: 0, size: u32::MAX - 4095 }
870                ]
871            );
872        }
873    }
874
875    #[test]
876    fn test_add_sparse_chunk_crc32() {
877        // Test they don't merge on same type (Crc32 is special).
878        {
879            let mut init_vec = Vec::<Chunk>::new();
880            init_vec.push(Chunk::Crc32 { checksum: 1234 });
881            let mut res = init_vec.clone();
882            add_sparse_chunk(&mut res, Chunk::Crc32 { checksum: 2345 }).unwrap();
883            assert_eq!(2, res.len());
884            assert_eq!(res, [Chunk::Crc32 { checksum: 1234 }, Chunk::Crc32 { checksum: 2345 }]);
885        }
886
887        // Test they don't merge on different type.
888        {
889            let mut init_vec = Vec::<Chunk>::new();
890            init_vec.push(Chunk::Crc32 { checksum: 1234 });
891            let mut res = init_vec.clone();
892            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
893            assert_eq!(2, res.len());
894            assert_eq!(
895                res,
896                [Chunk::Crc32 { checksum: 1234 }, Chunk::Fill { start: 0, size: 4096, value: 1 }]
897            );
898        }
899    }
900
901    ////////////////////////////////////////////////////////////////////////////
902    // Integration
903    //
904
905    #[test]
906    fn test_roundtrip() {
907        let tmpdir = TempDir::new().unwrap();
908
909        // Generate a large temporary file
910        let (mut file, _temp_path) = NamedTempFile::new_in(&tmpdir).unwrap().into_parts();
911        let mut rng = SmallRng::from_entropy();
912        let mut buf = Vec::<u8>::new();
913        buf.resize(1 * 4096, 0);
914        rng.fill_bytes(&mut buf);
915        file.write_all(&buf).unwrap();
916        file.flush().unwrap();
917        file.seek(SeekFrom::Start(0)).unwrap();
918        let content_size = buf.len();
919
920        // build a sparse file
921        let mut sparse_file = NamedTempFile::new_in(&tmpdir).unwrap().into_file();
922        SparseImageBuilder::new()
923            .add_chunk(DataSource::Buffer(Box::new([0xffu8; 8192])))
924            .add_chunk(DataSource::Reader { reader: Box::new(file), size: content_size as u64 })
925            .add_chunk(DataSource::Fill(0xaaaa_aaaau32, 1024))
926            .add_chunk(DataSource::Skip(16384))
927            .build(&mut sparse_file)
928            .expect("Build sparse image failed");
929        sparse_file.seek(SeekFrom::Start(0)).unwrap();
930
931        let mut orig_file = NamedTempFile::new_in(&tmpdir).unwrap().into_file();
932        unsparse(&mut sparse_file, &mut orig_file).expect("unsparse failed");
933        orig_file.seek(SeekFrom::Start(0)).unwrap();
934
935        let mut unsparsed_bytes = vec![];
936        orig_file.read_to_end(&mut unsparsed_bytes).expect("Failed to read unsparsed image");
937        assert_eq!(unsparsed_bytes.len(), 8192 + 20480 + content_size);
938        assert_eq!(&unsparsed_bytes[..8192], &[0xffu8; 8192]);
939        assert_eq!(&unsparsed_bytes[8192..8192 + content_size], &buf[..]);
940        assert_eq!(&unsparsed_bytes[8192 + content_size..12288 + content_size], &[0xaau8; 4096]);
941        assert_eq!(&unsparsed_bytes[12288 + content_size..], &[0u8; 16384]);
942    }
943
944    #[test]
945    /// test_with_simg2img is a "round trip" test that does the following
946    ///
947    /// 1. Generates a pseudorandom temporary file
948    /// 2. Builds sparse files out of it
949    /// 3. Uses the android tool simg2img to take the sparse files and generate
950    ///    the "original" image file out of them.
951    /// 4. Asserts the originally created file and the one created by simg2img
952    ///    have binary equivalent contents.
953    ///
954    /// This gives us a reasonable expectation of correctness given that the
955    /// Android-provided sparse tools are able to interpret our sparse images.
956    #[cfg(target_os = "linux")]
957    fn test_with_simg2img() {
958        let simg2img_path = Path::new("./host_x64/test_data/storage/sparse/simg2img");
959        assert!(
960            Path::exists(simg2img_path),
961            "simg2img binary must exist at {}",
962            simg2img_path.display()
963        );
964
965        let tmpdir = TempDir::new().unwrap();
966
967        // Generate a large temporary file
968        let (mut file, temp_path) = NamedTempFile::new_in(&tmpdir).unwrap().into_parts();
969        let mut rng = SmallRng::from_entropy();
970        let mut buf = Vec::<u8>::new();
971        buf.resize(50 * 4096, 0);
972        rng.fill_bytes(&mut buf);
973        file.write_all(&buf).unwrap();
974        file.flush().unwrap();
975        file.seek(SeekFrom::Start(0)).unwrap();
976
977        // build a sparse file
978        let files = build_sparse_files(
979            "test",
980            temp_path.to_path_buf().to_str().expect("Should succeed"),
981            tmpdir.path(),
982            4096 * 2,
983        )
984        .unwrap();
985
986        let mut simg2img_output = tmpdir.path().to_path_buf();
987        simg2img_output.push("output");
988
989        let mut simg2img = Command::new(simg2img_path)
990            .args(&files[..])
991            .arg(&simg2img_output)
992            .stdout(Stdio::piped())
993            .stderr(Stdio::piped())
994            .spawn()
995            .expect("Failed to spawn simg2img");
996        let res = simg2img.wait().expect("simg2img did was not running");
997        assert!(res.success(), "simg2img did not succeed");
998        let mut simg2img_stdout = simg2img.stdout.take().expect("Get stdout from simg2img");
999        let mut simg2img_stderr = simg2img.stderr.take().expect("Get stderr from simg2img");
1000
1001        let mut stdout = String::new();
1002        simg2img_stdout.read_to_string(&mut stdout).expect("Reading simg2img stdout");
1003        assert_eq!(stdout, "");
1004
1005        let mut stderr = String::new();
1006        simg2img_stderr.read_to_string(&mut stderr).expect("Reading simg2img stderr");
1007        assert_eq!(stderr, "");
1008
1009        let simg2img_output_bytes =
1010            std::fs::read(simg2img_output).expect("Failed to read simg2img output");
1011
1012        assert_eq!(
1013            buf, simg2img_output_bytes,
1014            "Output from simg2img should match our generated file"
1015        );
1016    }
1017}