1use anyhow::{Context, Error, anyhow};
6use delivery_blob::Type1Blob;
7pub use delivery_blob::compression::CompressionAlgorithm;
8use delivery_blob::compression::{ChunkedArchive, ChunkedArchiveOptions};
9use fuchsia_async as fasync;
10use fuchsia_merkle::{Hash, MerkleRootBuilder};
11use futures::{SinkExt as _, StreamExt as _, TryStreamExt as _, try_join};
12use fxfs::blob_metadata::{BlobFormat, BlobMetadata, BlobMetadataLeafHashCollector};
13use fxfs::errors::FxfsError;
14use fxfs::filesystem::{FxFilesystemBuilder, OpenFxFilesystem};
15use fxfs::object_handle::{ObjectHandle, ReadObjectHandle, WriteBytes};
16use fxfs::object_store::directory::Directory;
17use fxfs::object_store::journal::RESERVED_SPACE;
18use fxfs::object_store::journal::super_block::SuperBlockInstance;
19use fxfs::object_store::transaction::{LockKey, lock_keys};
20use fxfs::object_store::volume::root_volume;
21use fxfs::object_store::{
22 DataObjectHandle, DirectWriter, HandleOptions, NewChildStoreOptions, ObjectStore, StoreOptions,
23};
24use rayon::ThreadPoolBuilder;
25use rayon::prelude::*;
26use serde::{Deserialize, Serialize};
27use sparse::unsparse;
28use std::fs;
29use std::io::{BufWriter, Cursor, Read, Write};
30use std::path::PathBuf;
31use storage_device::DeviceHolder;
32use storage_device::file_backed_device::FileBackedDevice;
33
34pub const BLOB_VOLUME_NAME: &str = "blob";
35
36const BLOCK_SIZE: u32 = 4096;
37
38const READ_BUFFER_SIZE: u64 = 512;
39
40#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
41struct BlobsJsonOutputEntry {
42 source_path: String,
43 merkle: String,
44 bytes: usize,
45 size: u64,
46 file_size: usize,
47 compressed_file_size: u64,
48 merkle_tree_size: usize,
49 used_space_in_blobfs: u64,
51}
52
53type BlobsJsonOutput = Vec<BlobsJsonOutputEntry>;
54
55pub async fn make_blob_image(
66 output_image_path: &str,
67 sparse_output_image_path: Option<&str>,
68 blobs: Vec<(Hash, PathBuf)>,
69 json_output_path: &str,
70 target_size: Option<u64>,
71 compression_algorithm: Option<CompressionAlgorithm>,
72) -> Result<(), Error> {
73 let output_image = std::fs::OpenOptions::new()
74 .read(true)
75 .write(true)
76 .create(true)
77 .truncate(true)
78 .open(output_image_path)?;
79
80 let mut target_size = target_size.unwrap_or_default();
81
82 if target_size > 0 && target_size < BLOCK_SIZE as u64 {
83 return Err(anyhow!("Size {} is too small", target_size));
84 }
85 if target_size % BLOCK_SIZE as u64 > 0 {
86 return Err(anyhow!("Invalid size {} is not block-aligned", target_size));
87 }
88 let block_count = if target_size != 0 {
89 output_image.set_len(target_size).context("Failed to resize image")?;
91 target_size / BLOCK_SIZE as u64
92 } else {
93 const FOUR_GIGS: u64 = 4 * 1024 * 1024 * 1024;
98 FOUR_GIGS / BLOCK_SIZE as u64
99 };
100
101 let device = DeviceHolder::new(FileBackedDevice::new_with_block_count(
102 output_image,
103 BLOCK_SIZE,
104 block_count,
105 ));
106 let fxblob = FxBlobBuilder::new(device).await?;
107 let blobs_json = install_blobs(&fxblob, blobs, compression_algorithm).await.map_err(|e| {
108 if target_size != 0 && FxfsError::NoSpace.matches(&e) {
109 e.context(format!(
110 "Configured image size {} is too small to fit the base system image.",
111 target_size
112 ))
113 } else {
114 e
115 }
116 })?;
117 let actual_size = fxblob.finalize().await?.1;
118
119 if target_size == 0 {
120 target_size = (actual_size + RESERVED_SPACE) * 2;
123 }
124
125 if let Some(sparse_path) = sparse_output_image_path {
126 create_sparse_image(sparse_path, output_image_path, actual_size, target_size, BLOCK_SIZE)
127 .context("Failed to create sparse image")?;
128 }
129
130 if target_size != actual_size {
131 debug_assert!(target_size > actual_size);
132 let output_image =
133 std::fs::OpenOptions::new().read(true).write(true).open(output_image_path)?;
134 output_image.set_len(target_size).context("Failed to resize image")?;
135 }
136
137 let mut json_output = BufWriter::new(
138 std::fs::File::create(json_output_path).context("Failed to create JSON output file")?,
139 );
140 serde_json::to_writer_pretty(&mut json_output, &blobs_json)
141 .context("Failed to serialize to JSON output")?;
142
143 Ok(())
144}
145
146fn create_sparse_image(
147 sparse_output_image_path: &str,
148 image_path: &str,
149 actual_size: u64,
150 target_size: u64,
151 block_size: u32,
152) -> Result<(), Error> {
153 let image = std::fs::OpenOptions::new()
154 .read(true)
155 .open(image_path)
156 .with_context(|| format!("Failed to open {:?}", image_path))?;
157 let mut output = std::fs::OpenOptions::new()
158 .read(true)
159 .write(true)
160 .create(true)
161 .truncate(true)
162 .open(sparse_output_image_path)
163 .with_context(|| format!("Failed to create {:?}", sparse_output_image_path))?;
164 sparse::builder::SparseImageBuilder::new()
165 .set_block_size(block_size)
166 .add_source(sparse::builder::DataSource::Reader {
167 reader: Box::new(image),
168 size: actual_size,
169 })
170 .add_source(sparse::builder::DataSource::Skip(target_size - actual_size))
171 .build(&mut output)
172}
173
174pub struct FxBlobBuilder {
176 blob_directory: Directory<ObjectStore>,
177 filesystem: OpenFxFilesystem,
178}
179
180impl FxBlobBuilder {
181 pub async fn new(device: DeviceHolder) -> Result<Self, Error> {
183 let filesystem = FxFilesystemBuilder::new()
184 .format(true)
185 .trim_config(None)
186 .image_builder_mode(Some(SuperBlockInstance::A))
187 .open(device)
188 .await
189 .context("Failed to format filesystem")?;
190 filesystem.enable_allocations();
191 let root_volume = root_volume(filesystem.clone()).await?;
192 let vol = root_volume
193 .new_volume(BLOB_VOLUME_NAME, NewChildStoreOptions::default())
194 .await
195 .context("Failed to create volume")?;
196 let blob_directory = Directory::open(&vol, vol.root_directory_object_id())
197 .await
198 .context("Unable to open root blob directory")?;
199 Ok(Self { blob_directory, filesystem })
200 }
201
202 pub async fn finalize(self) -> Result<(DeviceHolder, u64), Error> {
206 self.filesystem.close().await?;
207 let actual_size = self.filesystem.allocator().maximum_offset();
208 Ok((self.filesystem.take_device().await, actual_size))
209 }
210
211 pub async fn install_blob(
213 &self,
214 blob: &BlobToInstall,
215 ) -> Result<DataObjectHandle<ObjectStore>, Error> {
216 let handle;
217 let keys = lock_keys![LockKey::object(
218 self.blob_directory.store().store_object_id(),
219 self.blob_directory.object_id(),
220 )];
221 let mut transaction = self
222 .filesystem
223 .clone()
224 .new_transaction(keys, Default::default())
225 .await
226 .context("new transaction")?;
227 handle = self
228 .blob_directory
229 .create_child_file_with_options(
230 &mut transaction,
231 &blob.hash.to_string(),
232 HandleOptions { skip_checksums: true, ..Default::default() },
234 )
235 .await
236 .context("create child file")?;
237 transaction.commit().await.context("transaction commit")?;
238
239 {
241 let mut writer = DirectWriter::new(&handle, Default::default()).await;
242 match &blob.data {
243 BlobData::Uncompressed(data) => {
244 writer.write_bytes(data).await.context("write blob contents")?;
245 }
246 BlobData::CompressedZstd(archive) | BlobData::CompressedLz4(archive) => {
247 for chunk in archive.chunks() {
248 writer
249 .write_bytes(&chunk.compressed_data)
250 .await
251 .context("write blob contents")?;
252 }
253 }
254 }
255 writer.complete().await.context("flush blob contents")?;
256 }
257
258 blob.metadata.write_to(&handle).await.context("write blob metadata")?;
260
261 Ok(handle)
262 }
263
264 pub fn generate_blob(
266 &self,
267 data: Vec<u8>,
268 compression_algorithm: Option<CompressionAlgorithm>,
269 ) -> Result<BlobToInstall, Error> {
270 BlobToInstall::new(data, self.filesystem.block_size() as usize, compression_algorithm)
271 }
272}
273
274enum BlobData {
275 Uncompressed(Vec<u8>),
276 CompressedZstd(ChunkedArchive),
277 CompressedLz4(ChunkedArchive),
278}
279
280fn compressed_offsets(chunked_archive: &ChunkedArchive) -> Vec<u64> {
281 let mut offsets = Vec::with_capacity(chunked_archive.chunks().len());
282 let mut offset: u64 = 0;
283 for chunk in chunked_archive.chunks() {
284 offsets.push(offset);
285 offset += chunk.compressed_data.len() as u64;
286 }
287 offsets
288}
289
290pub struct BlobToInstall {
292 hash: Hash,
294 data: BlobData,
296 uncompressed_size: usize,
298 metadata: BlobMetadata,
300 source: Option<PathBuf>,
303}
304
305impl BlobToInstall {
306 pub fn new(
308 data: Vec<u8>,
309 fs_block_size: usize,
310 compression_algorithm: Option<CompressionAlgorithm>,
311 ) -> Result<Self, Error> {
312 let (hash, hashes) =
313 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
314
315 let uncompressed_size = data.len();
316 let data = if let Some(compression_algorithm) = compression_algorithm {
317 maybe_compress(data, fs_block_size, compression_algorithm)
318 } else {
319 BlobData::Uncompressed(data)
320 };
321 let metadata = match &data {
322 BlobData::Uncompressed(_) => {
323 BlobMetadata { merkle_leaves: hashes, format: BlobFormat::Uncompressed }
324 }
325 BlobData::CompressedZstd(chunked_archive) => BlobMetadata {
326 merkle_leaves: hashes,
327 format: BlobFormat::ChunkedZstd {
328 uncompressed_size: uncompressed_size as u64,
329 chunk_size: chunked_archive.chunk_size() as u64,
330 compressed_offsets: compressed_offsets(&chunked_archive),
331 },
332 },
333 BlobData::CompressedLz4(chunked_archive) => BlobMetadata {
334 merkle_leaves: hashes,
335 format: BlobFormat::ChunkedLz4 {
336 uncompressed_size: uncompressed_size as u64,
337 chunk_size: chunked_archive.chunk_size() as u64,
338 compressed_offsets: compressed_offsets(&chunked_archive),
339 },
340 },
341 };
342 Ok(BlobToInstall { hash, data, uncompressed_size, metadata, source: None })
343 }
344
345 pub fn new_from_file(
348 path: PathBuf,
349 fs_block_size: usize,
350 compression_algorithm: Option<CompressionAlgorithm>,
351 ) -> Result<Self, Error> {
352 let mut data = Vec::new();
353 std::fs::File::open(&path)
354 .with_context(|| format!("Unable to open `{:?}'", &path))?
355 .read_to_end(&mut data)
356 .with_context(|| format!("Unable to read contents of `{:?}'", &path))?;
357 let blob = Self::new(data, fs_block_size, compression_algorithm)?;
358 Ok(Self { source: Some(path), ..blob })
359 }
360
361 pub fn hash(&self) -> Hash {
362 self.hash.clone()
363 }
364}
365
366async fn install_blobs(
367 fxblob: &FxBlobBuilder,
368 blobs: Vec<(Hash, PathBuf)>,
369 compression_algorithm: Option<CompressionAlgorithm>,
370) -> Result<BlobsJsonOutput, Error> {
371 let num_blobs = blobs.len();
372 let fs_block_size = fxblob.filesystem.block_size() as usize;
373 let (tx, rx) = futures::channel::mpsc::channel::<BlobToInstall>(0);
375 let num_threads: usize = std::thread::available_parallelism().unwrap().into();
377 let thread_pool = ThreadPoolBuilder::new().num_threads(num_threads).build().unwrap();
378 let generate = fasync::unblock(move || {
379 thread_pool.install(|| {
380 blobs.par_iter().try_for_each(|(hash, path)| {
381 let blob = BlobToInstall::new_from_file(
382 path.clone(),
383 fs_block_size,
384 compression_algorithm,
385 )?;
386 if &blob.hash != hash {
387 let calculated_hash = &blob.hash;
388 let path = path.display();
389 return Err(anyhow!(
390 "Hash mismatch for {path}: calculated={calculated_hash}, expected={hash}"
391 ));
392 }
393 futures::executor::block_on(tx.clone().send(blob))
394 .context("send blob to install task")
395 })
396 })?;
397 Ok(())
398 });
399 const MAX_INSTALL_CONCURRENCY: usize = 10;
401 let install = rx
402 .map(|blob| install_blob_with_json_output(fxblob, blob))
403 .buffer_unordered(MAX_INSTALL_CONCURRENCY)
404 .try_collect::<BlobsJsonOutput>();
405 let (installed_blobs, _) = try_join!(install, generate)?;
406 assert_eq!(installed_blobs.len(), num_blobs);
407 Ok(installed_blobs)
408}
409
410async fn install_blob_with_json_output(
411 fxblob: &FxBlobBuilder,
412 blob: BlobToInstall,
413) -> Result<BlobsJsonOutputEntry, Error> {
414 let handle = fxblob.install_blob(&blob).await?;
415 let properties = handle.get_properties().await.context("get properties")?;
416 let source_path = blob
417 .source
418 .expect("missing source path")
419 .to_str()
420 .context("blob path to utf8")?
421 .to_string();
422 Ok(BlobsJsonOutputEntry {
423 source_path,
424 merkle: blob.hash.to_string(),
425 bytes: blob.uncompressed_size,
426 size: properties.allocated_size,
427 file_size: blob.uncompressed_size,
428 compressed_file_size: properties.data_attribute_size,
429 merkle_tree_size: blob.metadata.serialized_size().context("blob metadata size")?,
430 used_space_in_blobfs: properties.allocated_size,
431 })
432}
433
434fn maybe_compress(
435 buf: Vec<u8>,
436 filesystem_block_size: usize,
437 compression_algorithm: CompressionAlgorithm,
438) -> BlobData {
439 if buf.len() <= filesystem_block_size {
440 return BlobData::Uncompressed(buf); }
442 let chunked_archive_options = match compression_algorithm {
443 CompressionAlgorithm::Zstd => {
444 Type1Blob::CHUNKED_ARCHIVE_OPTIONS
446 }
447 CompressionAlgorithm::Lz4 => ChunkedArchiveOptions::V3 { compression_algorithm },
448 };
449 let archive =
450 ChunkedArchive::new(&buf, chunked_archive_options).expect("failed to compress data");
451 if archive.compressed_data_size().checked_next_multiple_of(filesystem_block_size).unwrap()
452 >= buf.len()
453 {
454 BlobData::Uncompressed(buf) } else {
456 match compression_algorithm {
457 CompressionAlgorithm::Zstd => BlobData::CompressedZstd(archive),
458 CompressionAlgorithm::Lz4 => BlobData::CompressedLz4(archive),
459 }
460 }
461}
462
463pub async fn extract_blobs(image: PathBuf, out_dir: PathBuf) -> anyhow::Result<()> {
465 if out_dir.exists() {
466 fs::remove_dir_all(&out_dir).context("Failed to remove output directory")?;
467 }
468 fs::create_dir_all(&out_dir)?;
469
470 let image_bytes = fs::read(&image)?;
474 let mut source = Cursor::new(image_bytes);
475 let mut unsparsed = Cursor::new(Vec::<u8>::new());
476 let mut non_sparse_image = tempfile::NamedTempFile::new_in(&out_dir)?;
477 unsparse(&mut source, &mut unsparsed)?;
478 non_sparse_image.write_all(&unsparsed.into_inner())?;
479
480 let device = DeviceHolder::new(FileBackedDevice::new(non_sparse_image.reopen()?, BLOCK_SIZE));
481 let fs = FxFilesystemBuilder::new().read_only(true).open(device).await?;
482 let vol =
483 root_volume(fs.clone()).await?.volume(BLOB_VOLUME_NAME, StoreOptions::default()).await?;
484 let root_dir = Directory::open(&vol, vol.root_directory_object_id()).await?;
485 let layer_set = root_dir.store().tree().layer_set();
486 let mut merger = layer_set.merger();
487 let mut iter = root_dir.iter(&mut merger).await?;
488 let blob_extraction_futures = futures::stream::FuturesUnordered::new();
489
490 while let Some((name, object_id, descriptor)) = iter.get() {
491 if *descriptor == fxfs::object_store::ObjectDescriptor::File {
492 let handle = fxfs::object_store::ObjectStore::open_object(
493 root_dir.owner(),
494 object_id,
495 fxfs::object_store::HandleOptions::default(),
496 None,
497 )
498 .await?;
499
500 let out_path = out_dir.join(name);
501 let mut file = std::fs::File::create(&out_path)?;
502 let mut read_buf = Vec::new();
503 let mut offset = 0;
504 let mut buf =
505 handle.allocate_buffer((handle.block_size() * READ_BUFFER_SIZE) as usize).await;
506 loop {
507 let bytes = handle.read(offset, buf.as_mut()).await?;
508 if bytes == 0 {
509 break;
510 }
511 offset += bytes as u64;
512 read_buf.write_all(&buf.as_slice()[..bytes])?;
513 }
514
515 let metadata = BlobMetadata::read_from(&handle).await?;
516 blob_extraction_futures.push(fasync::unblock(move || -> Result<(), Error> {
517 match metadata.format {
518 BlobFormat::ChunkedZstd {
519 uncompressed_size,
520 compressed_offsets,
521 chunk_size,
522 } => decompress_blob(
523 &read_buf,
524 uncompressed_size,
525 compressed_offsets,
526 chunk_size,
527 CompressionAlgorithm::Zstd,
528 &mut file,
529 ),
530 BlobFormat::ChunkedLz4 {
531 uncompressed_size,
532 compressed_offsets,
533 chunk_size,
534 } => decompress_blob(
535 &read_buf,
536 uncompressed_size,
537 compressed_offsets,
538 chunk_size,
539 CompressionAlgorithm::Lz4,
540 &mut file,
541 ),
542 BlobFormat::Uncompressed => {
543 file.write_all(&read_buf)?;
544 Ok(())
545 }
546 }
547 }));
548 }
549 iter.advance().await?;
550 }
551 blob_extraction_futures.try_collect::<()>().await?;
552 Ok(())
553}
554
555fn decompress_blob(
556 blob_data: &[u8],
557 uncompressed_size: u64,
558 compressed_offsets: Vec<u64>,
559 chunk_size: u64,
560 compression_algorithm: CompressionAlgorithm,
561 out: &mut std::fs::File,
562) -> Result<(), Error> {
563 let mut decompressor = compression_algorithm.decompressor();
564 let mut buf = vec![0; chunk_size as usize];
565 let mut total_decompressed_size = 0;
566 for i in 0..compressed_offsets.len() {
567 let start_offset = compressed_offsets[i] as usize;
568 let end_offset = if i + 1 == compressed_offsets.len() {
569 blob_data.len()
570 } else {
571 compressed_offsets[i + 1] as usize
572 };
573 let decompressed_size =
574 decompressor.decompress_into(&blob_data[start_offset..end_offset], &mut buf, i)?;
575 total_decompressed_size += decompressed_size;
576 out.write_all(&buf[..decompressed_size])?;
577 }
578 if total_decompressed_size != uncompressed_size as usize {
579 Err(anyhow!(
580 "Decompressed size does not match expected size {} {}",
581 total_decompressed_size,
582 uncompressed_size
583 ))
584 } else {
585 Ok(())
586 }
587}
588
589#[cfg(test)]
590mod tests {
591 use super::{BlobsJsonOutput, BlobsJsonOutputEntry, extract_blobs, make_blob_image};
592 use assert_matches::assert_matches;
593 use delivery_blob::compression::CompressionAlgorithm;
594 use fuchsia_async as fasync;
595 use fxfs::filesystem::FxFilesystem;
596 use fxfs::object_store::StoreOptions;
597 use fxfs::object_store::directory::Directory;
598 use fxfs::object_store::volume::root_volume;
599 use sparse::reader::SparseReader;
600 use std::fs::File;
601 use std::io::{Seek as _, SeekFrom, Write};
602 use std::path::Path;
603 use std::str::from_utf8;
604 use storage_device::DeviceHolder;
605 use storage_device::file_backed_device::FileBackedDevice;
606 use tempfile::TempDir;
607
608 #[fasync::run(10, test)]
609 async fn test_extract_blobs_zstd() {
610 let tmp = TempDir::new().unwrap();
611 let dir = tmp.path();
612
613 let input_blob_path = dir.join("input.txt");
614 let image_path = dir.join("fxfs1.blk");
615 let sparse_path = dir.join("fxfs1.sparse.blk");
616 let out_dir = dir.join("extracted_out");
617
618 let data = "C".repeat(128 * 1024);
619 std::fs::write(&input_blob_path, &data).unwrap();
620
621 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
622
623 make_blob_image(
624 image_path.to_str().unwrap(),
625 Some(sparse_path.to_str().unwrap()),
626 vec![(merkle_hash, input_blob_path.clone())],
627 dir.join("blobs1.json").to_str().unwrap(),
628 None,
629 Some(CompressionAlgorithm::Zstd),
630 )
631 .await
632 .expect("make_blob_image failed");
633
634 extract_blobs(sparse_path, out_dir.clone())
635 .await
636 .expect("Extraction failed inside extract_blobs");
637
638 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
639 let first_entry = extracted_files
640 .next()
641 .expect("No files were extracted!")
642 .expect("Failed to read directory entry");
643
644 let extracted_blob_path = first_entry.path();
645 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
646
647 assert_eq!(
648 final_len,
649 data.len() as u64,
650 "Decompressed data size does not match original size",
651 );
652 }
653
654 #[fasync::run(10, test)]
655 async fn test_extract_blobs_lz4() {
656 let tmp = TempDir::new().unwrap();
657 let dir = tmp.path();
658
659 let input_blob_path = dir.join("input.txt");
660 let image_path = dir.join("fxfs1.blk");
661 let sparse_path = dir.join("fxfs1.sparse.blk");
662 let out_dir = dir.join("extracted_out");
663
664 let data = "C".repeat(128 * 1024);
665 std::fs::write(&input_blob_path, &data).unwrap();
666
667 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
668
669 make_blob_image(
670 image_path.to_str().unwrap(),
671 Some(sparse_path.to_str().unwrap()),
672 vec![(merkle_hash, input_blob_path.clone())],
673 dir.join("blobs1.json").to_str().unwrap(),
674 None,
675 Some(CompressionAlgorithm::Lz4),
676 )
677 .await
678 .expect("make_blob_image failed");
679
680 extract_blobs(sparse_path, out_dir.clone())
681 .await
682 .expect("Extraction failed inside extract_blobs");
683
684 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
685 let first_entry = extracted_files
686 .next()
687 .expect("No files were extracted!")
688 .expect("Failed to read directory entry");
689
690 let extracted_blob_path = first_entry.path();
691 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
692
693 assert_eq!(
694 final_len,
695 data.len() as u64,
696 "Decompressed data size does not match original size",
697 );
698 }
699
700 #[fasync::run(10, test)]
701 async fn test_make_blob_image() {
702 let tmp = TempDir::new().unwrap();
703 let dir = tmp.path();
704 let blobs_in = {
705 let write_data = |path, data: &str| {
706 let mut file = File::create(&path).unwrap();
707 write!(file, "{}", data).unwrap();
708 let root = fuchsia_merkle::root_from_slice(data);
709 (root, path)
710 };
711 vec![
712 write_data(dir.join("stuff1.txt"), "Goodbye, stranger!"),
713 write_data(dir.join("stuff2.txt"), "It's been nice!"),
714 write_data(dir.join("stuff3.txt"), from_utf8(&['a' as u8; 65_537]).unwrap()),
715 ]
716 };
717
718 let dir = tmp.path();
719 let output_path = dir.join("fxfs.blk");
720 let sparse_path = dir.join("fxfs.sparse.blk");
721 let blobs_json_path = dir.join("blobs.json");
722 make_blob_image(
723 output_path.as_os_str().to_str().unwrap(),
724 Some(sparse_path.as_os_str().to_str().unwrap()),
725 blobs_in,
726 blobs_json_path.as_os_str().to_str().unwrap(),
727 None,
728 Some(CompressionAlgorithm::Zstd),
729 )
730 .await
731 .expect("make_blob_image failed");
732
733 let mut blobs_json = std::fs::OpenOptions::new()
735 .read(true)
736 .open(blobs_json_path)
737 .expect("Failed to open blob manifest");
738 let mut blobs: BlobsJsonOutput =
739 serde_json::from_reader(&mut blobs_json).expect("Failed to serialize to JSON output");
740
741 assert_eq!(blobs.len(), 3);
742 blobs.sort_by_key(|entry| entry.source_path.clone());
743
744 assert_eq!(Path::new(blobs[0].source_path.as_str()), dir.join("stuff1.txt"));
745 assert_matches!(
746 &blobs[0],
747 BlobsJsonOutputEntry {
748 merkle,
749 bytes: 18,
750 size: 4096,
751 file_size: 18,
752 merkle_tree_size: 0,
753 used_space_in_blobfs: 4096,
754 ..
755 } if merkle == "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a"
756 );
757 assert_eq!(Path::new(blobs[1].source_path.as_str()), dir.join("stuff2.txt"));
758 assert_matches!(
759 &blobs[1],
760 BlobsJsonOutputEntry {
761 merkle,
762 bytes: 15,
763 size: 4096,
764 file_size: 15,
765 merkle_tree_size: 0,
766 used_space_in_blobfs: 4096,
767 ..
768 } if merkle == "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916"
769 );
770 assert_eq!(Path::new(blobs[2].source_path.as_str()), dir.join("stuff3.txt"));
771 assert_matches!(
772 &blobs[2],
773 BlobsJsonOutputEntry {
774 merkle,
775 bytes: 65537,
776 size: 8192,
779 file_size: 65537,
780 merkle_tree_size: 308,
781 used_space_in_blobfs: 8192,
782 ..
783 } if merkle == "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d"
784 );
785
786 let unsparsed_image = {
787 let sparse_image = std::fs::OpenOptions::new().read(true).open(sparse_path).unwrap();
788 let mut reader = SparseReader::new(sparse_image).expect("Failed to parse sparse image");
789
790 let unsparsed_image_path = dir.join("fxfs.unsparsed.blk");
791 let mut unsparsed_image = std::fs::OpenOptions::new()
792 .read(true)
793 .write(true)
794 .create(true)
795 .open(unsparsed_image_path)
796 .unwrap();
797
798 std::io::copy(&mut reader, &mut unsparsed_image).expect("Failed to unsparse");
799 unsparsed_image.seek(SeekFrom::Start(0)).unwrap();
800 unsparsed_image
801 };
802
803 let orig_image = std::fs::OpenOptions::new()
804 .read(true)
805 .open(output_path.clone())
806 .expect("Failed to open image");
807
808 assert_eq!(unsparsed_image.metadata().unwrap().len(), orig_image.metadata().unwrap().len());
809
810 for image in [orig_image, unsparsed_image] {
812 let device = DeviceHolder::new(FileBackedDevice::new(image, 4096));
813 let filesystem = FxFilesystem::open(device).await.unwrap();
814 let root_volume = root_volume(filesystem.clone()).await.expect("Opening root volume");
815 let vol =
816 root_volume.volume("blob", StoreOptions::default()).await.expect("Opening volume");
817 let directory = Directory::open(&vol, vol.root_directory_object_id())
818 .await
819 .expect("Opening root dir");
820 let entries = {
821 let layer_set = directory.store().tree().layer_set();
822 let mut merger = layer_set.merger();
823 let mut iter = directory.iter(&mut merger).await.expect("iter failed");
824 let mut entries = vec![];
825 while let Some((name, _, _)) = iter.get() {
826 entries.push(name.to_string());
827 iter.advance().await.expect("advance failed");
828 }
829 entries
830 };
831 assert_eq!(
832 &entries[..],
833 &[
834 "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d",
835 "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a",
836 "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916",
837 ]
838 );
839 }
840 }
841
842 #[fasync::run(10, test)]
843 async fn test_make_uncompressed_blob_image() {
844 let tmp = TempDir::new().unwrap();
845 let dir = tmp.path();
846 let path = dir.join("large_blob.txt");
847 let mut file = File::create(&path).unwrap();
848 let data = vec![0xabu8; 32 * 1024 * 1024];
849 file.write_all(&data).unwrap();
850 let root = fuchsia_merkle::root_from_slice(&data);
851 let blobs_in = vec![(root, path)];
852
853 let compressed_path = dir.join("fxfs-compressed.blk");
854 let blobs_json_path = dir.join("blobs.json");
855 make_blob_image(
856 compressed_path.as_os_str().to_str().unwrap(),
857 None,
858 blobs_in.clone(),
859 blobs_json_path.as_os_str().to_str().unwrap(),
860 None,
861 Some(CompressionAlgorithm::Zstd),
862 )
863 .await
864 .expect("make_blob_image failed");
865
866 let uncompressed_path = dir.join("fxfs-uncompressed.blk");
867 make_blob_image(
868 uncompressed_path.as_os_str().to_str().unwrap(),
869 None,
870 blobs_in,
871 blobs_json_path.as_os_str().to_str().unwrap(),
872 None,
873 None,
874 )
875 .await
876 .expect("make_blob_image failed");
877
878 assert!(
879 std::fs::metadata(compressed_path).unwrap().len()
880 < std::fs::metadata(uncompressed_path).unwrap().len()
881 )
882 }
883
884 #[fasync::run(10, test)]
885 async fn test_make_blob_image_with_target_size() {
886 const TARGET_SIZE: u64 = 200 * 1024 * 1024;
887 let tmp = TempDir::new().unwrap();
888 let dir = tmp.path();
889 let path = dir.join("large_blob.txt");
890 let mut file = File::create(&path).unwrap();
891 let data = vec![0xabu8; 8 * 1024 * 1024];
892 file.write_all(&data).unwrap();
893 let root = fuchsia_merkle::root_from_slice(&data);
894 let blobs_in = vec![(root, path)];
895
896 let image_path = dir.join("fxfs.blk");
897 let sparse_image_path = dir.join("fxfs.sparse.blk");
898 let blobs_json_path = dir.join("blobs.json");
899 make_blob_image(
900 image_path.as_os_str().to_str().unwrap(),
901 Some(sparse_image_path.as_os_str().to_str().unwrap()),
902 blobs_in.clone(),
903 blobs_json_path.as_os_str().to_str().unwrap(),
904 Some(200 * 1024 * 1024),
905 Some(CompressionAlgorithm::Zstd),
906 )
907 .await
908 .expect("make_blob_image failed");
909
910 let image_size = std::fs::metadata(image_path).unwrap().len();
913 let sparse_image_size = std::fs::metadata(sparse_image_path).unwrap().len();
914 assert_eq!(image_size, TARGET_SIZE);
915 assert!(sparse_image_size < TARGET_SIZE, "Sparse image size: {sparse_image_size}");
916 }
917}