1use anyhow::{Context, Error, anyhow};
6use delivery_blob::Type1Blob;
7pub use delivery_blob::compression::CompressionAlgorithm;
8use delivery_blob::compression::{ChunkedArchive, ChunkedArchiveOptions};
9use fuchsia_async as fasync;
10use fuchsia_merkle::{Hash, MerkleRootBuilder};
11use futures::{SinkExt as _, StreamExt as _, TryStreamExt as _, try_join};
12use fxfs::blob_metadata::{BlobFormat, BlobMetadata, BlobMetadataLeafHashCollector};
13use fxfs::errors::FxfsError;
14use fxfs::filesystem::{FxFilesystemBuilder, OpenFxFilesystem};
15use fxfs::object_handle::{ObjectHandle, ReadObjectHandle, WriteBytes};
16use fxfs::object_store::directory::Directory;
17use fxfs::object_store::journal::RESERVED_SPACE;
18use fxfs::object_store::journal::super_block::SuperBlockInstance;
19use fxfs::object_store::transaction::{LockKey, lock_keys};
20use fxfs::object_store::volume::root_volume;
21use fxfs::object_store::{
22 DataObjectHandle, DirectWriter, HandleOptions, NewChildStoreOptions, ObjectStore, StoreOptions,
23};
24use rayon::ThreadPoolBuilder;
25use rayon::prelude::*;
26use serde::{Deserialize, Serialize};
27use sparse::unsparse;
28use std::fs;
29use std::io::{BufWriter, Read, Write};
30use std::path::PathBuf;
31use storage_device::DeviceHolder;
32use storage_device::file_backed_device::FileBackedDevice;
33
34pub const BLOB_VOLUME_NAME: &str = "blob";
35
36const BLOCK_SIZE: u32 = 4096;
37
38const READ_BUFFER_SIZE: u64 = 512;
39
40#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
41struct BlobsJsonOutputEntry {
42 source_path: String,
43 merkle: String,
44 bytes: usize,
45 size: u64,
46 file_size: usize,
47 compressed_file_size: u64,
48 merkle_tree_size: usize,
49 used_space_in_blobfs: u64,
51}
52
53type BlobsJsonOutput = Vec<BlobsJsonOutputEntry>;
54
55pub async fn make_blob_image(
66 output_image_path: &str,
67 sparse_output_image_path: Option<&str>,
68 blobs: Vec<(Hash, PathBuf)>,
69 json_output_path: &str,
70 target_size: Option<u64>,
71 compression_algorithm: Option<CompressionAlgorithm>,
72) -> Result<(), Error> {
73 let output_image = std::fs::OpenOptions::new()
74 .read(true)
75 .write(true)
76 .create(true)
77 .truncate(true)
78 .open(output_image_path)?;
79
80 let mut target_size = target_size.unwrap_or_default();
81
82 if target_size > 0 && target_size < BLOCK_SIZE as u64 {
83 return Err(anyhow!("Size {} is too small", target_size));
84 }
85 if target_size % BLOCK_SIZE as u64 > 0 {
86 return Err(anyhow!("Invalid size {} is not block-aligned", target_size));
87 }
88 let block_count = if target_size != 0 {
89 output_image.set_len(target_size).context("Failed to resize image")?;
91 target_size / BLOCK_SIZE as u64
92 } else {
93 const FOUR_GIGS: u64 = 4 * 1024 * 1024 * 1024;
98 FOUR_GIGS / BLOCK_SIZE as u64
99 };
100
101 let device = DeviceHolder::new(FileBackedDevice::new_with_block_count(
102 output_image,
103 BLOCK_SIZE,
104 block_count,
105 ));
106 let fxblob = FxBlobBuilder::new(device).await?;
107 let blobs_json = install_blobs(&fxblob, blobs, compression_algorithm).await.map_err(|e| {
108 if target_size != 0 && FxfsError::NoSpace.matches(&e) {
109 e.context(format!(
110 "Configured image size {} is too small to fit the base system image.",
111 target_size
112 ))
113 } else {
114 e
115 }
116 })?;
117 let actual_size = fxblob.finalize().await?.1;
118
119 if target_size == 0 {
120 target_size = (actual_size + RESERVED_SPACE) * 2;
123 }
124
125 if let Some(sparse_path) = sparse_output_image_path {
126 create_sparse_image(sparse_path, output_image_path, actual_size, target_size, BLOCK_SIZE)
127 .context("Failed to create sparse image")?;
128 }
129
130 if target_size != actual_size {
131 debug_assert!(target_size > actual_size);
132 let output_image =
133 std::fs::OpenOptions::new().read(true).write(true).open(output_image_path)?;
134 output_image.set_len(target_size).context("Failed to resize image")?;
135 }
136
137 let mut json_output = BufWriter::new(
138 std::fs::File::create(json_output_path).context("Failed to create JSON output file")?,
139 );
140 serde_json::to_writer_pretty(&mut json_output, &blobs_json)
141 .context("Failed to serialize to JSON output")?;
142
143 Ok(())
144}
145
146fn create_sparse_image(
147 sparse_output_image_path: &str,
148 image_path: &str,
149 actual_size: u64,
150 target_size: u64,
151 block_size: u32,
152) -> Result<(), Error> {
153 let image = std::fs::OpenOptions::new()
154 .read(true)
155 .open(image_path)
156 .with_context(|| format!("Failed to open {:?}", image_path))?;
157 let mut output = std::fs::OpenOptions::new()
158 .read(true)
159 .write(true)
160 .create(true)
161 .truncate(true)
162 .open(sparse_output_image_path)
163 .with_context(|| format!("Failed to create {:?}", sparse_output_image_path))?;
164 sparse::builder::SparseImageBuilder::new()
165 .set_block_size(block_size)
166 .add_source(sparse::builder::DataSource::Reader {
167 reader: Box::new(image),
168 size: actual_size,
169 })
170 .add_source(sparse::builder::DataSource::Skip(target_size - actual_size))
171 .build(&mut output)
172}
173
174pub struct FxBlobBuilder {
176 blob_directory: Directory<ObjectStore>,
177 filesystem: OpenFxFilesystem,
178}
179
180impl FxBlobBuilder {
181 pub async fn new(device: DeviceHolder) -> Result<Self, Error> {
183 let filesystem = FxFilesystemBuilder::new()
184 .format(true)
185 .trim_config(None)
186 .image_builder_mode(Some(SuperBlockInstance::A))
187 .open(device)
188 .await
189 .context("Failed to format filesystem")?;
190 filesystem.enable_allocations();
191 let root_volume = root_volume(filesystem.clone()).await?;
192 let vol = root_volume
193 .new_volume(BLOB_VOLUME_NAME, NewChildStoreOptions::default())
194 .await
195 .context("Failed to create volume")?;
196 let blob_directory = Directory::open(&vol, vol.root_directory_object_id())
197 .await
198 .context("Unable to open root blob directory")?;
199 Ok(Self { blob_directory, filesystem })
200 }
201
202 pub async fn finalize(self) -> Result<(DeviceHolder, u64), Error> {
206 self.filesystem.close().await?;
207 let actual_size = self.filesystem.allocator().maximum_offset();
208 Ok((self.filesystem.take_device().await, actual_size))
209 }
210
211 pub async fn install_blob(
213 &self,
214 blob: &BlobToInstall,
215 ) -> Result<DataObjectHandle<ObjectStore>, Error> {
216 let handle;
217 let keys = lock_keys![LockKey::object(
218 self.blob_directory.store().store_object_id(),
219 self.blob_directory.object_id(),
220 )];
221 let mut transaction = self
222 .filesystem
223 .clone()
224 .new_transaction(keys, Default::default())
225 .await
226 .context("new transaction")?;
227 handle = self
228 .blob_directory
229 .create_child_file_with_options(
230 &mut transaction,
231 &blob.hash.to_string(),
232 HandleOptions { skip_checksums: true, ..Default::default() },
234 )
235 .await
236 .context("create child file")?;
237 transaction.commit().await.context("transaction commit")?;
238
239 {
241 let mut writer = DirectWriter::new(&handle, Default::default()).await;
242 match &blob.data {
243 BlobData::Uncompressed(data) => {
244 writer.write_bytes(data).await.context("write blob contents")?;
245 }
246 BlobData::CompressedZstd(archive) | BlobData::CompressedLz4(archive) => {
247 for chunk in archive.chunks() {
248 writer
249 .write_bytes(&chunk.compressed_data)
250 .await
251 .context("write blob contents")?;
252 }
253 }
254 }
255 writer.complete().await.context("flush blob contents")?;
256 }
257
258 blob.metadata.write_to(&handle).await.context("write blob metadata")?;
260
261 Ok(handle)
262 }
263
264 pub fn generate_blob(
266 &self,
267 data: Vec<u8>,
268 compression_algorithm: Option<CompressionAlgorithm>,
269 ) -> Result<BlobToInstall, Error> {
270 BlobToInstall::new(data, self.filesystem.block_size() as usize, compression_algorithm)
271 }
272}
273
274enum BlobData {
275 Uncompressed(Vec<u8>),
276 CompressedZstd(ChunkedArchive),
277 CompressedLz4(ChunkedArchive),
278}
279
280fn compressed_offsets(chunked_archive: &ChunkedArchive) -> Vec<u64> {
281 let mut offsets = Vec::with_capacity(chunked_archive.chunks().len());
282 let mut offset: u64 = 0;
283 for chunk in chunked_archive.chunks() {
284 offsets.push(offset);
285 offset += chunk.compressed_data.len() as u64;
286 }
287 offsets
288}
289
290pub struct BlobToInstall {
292 hash: Hash,
294 data: BlobData,
296 uncompressed_size: usize,
298 metadata: BlobMetadata,
300 source: Option<PathBuf>,
303}
304
305impl BlobToInstall {
306 pub fn new(
308 data: Vec<u8>,
309 fs_block_size: usize,
310 compression_algorithm: Option<CompressionAlgorithm>,
311 ) -> Result<Self, Error> {
312 let (hash, hashes) =
313 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
314
315 let uncompressed_size = data.len();
316 let data = if let Some(compression_algorithm) = compression_algorithm {
317 maybe_compress(data, fs_block_size, compression_algorithm)
318 } else {
319 BlobData::Uncompressed(data)
320 };
321 let metadata = match &data {
322 BlobData::Uncompressed(_) => {
323 BlobMetadata { merkle_leaves: hashes, format: BlobFormat::Uncompressed }
324 }
325 BlobData::CompressedZstd(chunked_archive) => BlobMetadata {
326 merkle_leaves: hashes,
327 format: BlobFormat::ChunkedZstd {
328 uncompressed_size: uncompressed_size as u64,
329 chunk_size: chunked_archive.chunk_size() as u64,
330 compressed_offsets: compressed_offsets(&chunked_archive),
331 },
332 },
333 BlobData::CompressedLz4(chunked_archive) => BlobMetadata {
334 merkle_leaves: hashes,
335 format: BlobFormat::ChunkedLz4 {
336 uncompressed_size: uncompressed_size as u64,
337 chunk_size: chunked_archive.chunk_size() as u64,
338 compressed_offsets: compressed_offsets(&chunked_archive),
339 },
340 },
341 };
342 Ok(BlobToInstall { hash, data, uncompressed_size, metadata, source: None })
343 }
344
345 pub fn new_from_file(
348 path: PathBuf,
349 fs_block_size: usize,
350 compression_algorithm: Option<CompressionAlgorithm>,
351 ) -> Result<Self, Error> {
352 let mut data = Vec::new();
353 std::fs::File::open(&path)
354 .with_context(|| format!("Unable to open `{:?}'", &path))?
355 .read_to_end(&mut data)
356 .with_context(|| format!("Unable to read contents of `{:?}'", &path))?;
357 let blob = Self::new(data, fs_block_size, compression_algorithm)?;
358 Ok(Self { source: Some(path), ..blob })
359 }
360
361 pub fn hash(&self) -> Hash {
362 self.hash.clone()
363 }
364}
365
366async fn install_blobs(
367 fxblob: &FxBlobBuilder,
368 blobs: Vec<(Hash, PathBuf)>,
369 compression_algorithm: Option<CompressionAlgorithm>,
370) -> Result<BlobsJsonOutput, Error> {
371 let num_blobs = blobs.len();
372 let fs_block_size = fxblob.filesystem.block_size() as usize;
373 let (tx, rx) = futures::channel::mpsc::channel::<BlobToInstall>(0);
375 let num_threads: usize = std::thread::available_parallelism().unwrap().into();
377 let thread_pool = ThreadPoolBuilder::new().num_threads(num_threads).build().unwrap();
378 let generate = fasync::unblock(move || {
379 thread_pool.install(|| {
380 blobs.par_iter().try_for_each(|(hash, path)| {
381 let blob = BlobToInstall::new_from_file(
382 path.clone(),
383 fs_block_size,
384 compression_algorithm,
385 )?;
386 if &blob.hash != hash {
387 let calculated_hash = &blob.hash;
388 let path = path.display();
389 return Err(anyhow!(
390 "Hash mismatch for {path}: calculated={calculated_hash}, expected={hash}"
391 ));
392 }
393 futures::executor::block_on(tx.clone().send(blob))
394 .context("send blob to install task")
395 })
396 })?;
397 Ok(())
398 });
399 const MAX_INSTALL_CONCURRENCY: usize = 10;
401 let install = rx
402 .map(|blob| install_blob_with_json_output(fxblob, blob))
403 .buffer_unordered(MAX_INSTALL_CONCURRENCY)
404 .try_collect::<BlobsJsonOutput>();
405 let (installed_blobs, _) = try_join!(install, generate)?;
406 assert_eq!(installed_blobs.len(), num_blobs);
407 Ok(installed_blobs)
408}
409
410async fn install_blob_with_json_output(
411 fxblob: &FxBlobBuilder,
412 blob: BlobToInstall,
413) -> Result<BlobsJsonOutputEntry, Error> {
414 let handle = fxblob.install_blob(&blob).await?;
415 let properties = handle.get_properties().await.context("get properties")?;
416 let source_path = blob
417 .source
418 .expect("missing source path")
419 .to_str()
420 .context("blob path to utf8")?
421 .to_string();
422 Ok(BlobsJsonOutputEntry {
423 source_path,
424 merkle: blob.hash.to_string(),
425 bytes: blob.uncompressed_size,
426 size: properties.allocated_size,
427 file_size: blob.uncompressed_size,
428 compressed_file_size: properties.data_attribute_size,
429 merkle_tree_size: blob.metadata.serialized_size().context("blob metadata size")?,
430 used_space_in_blobfs: properties.allocated_size,
431 })
432}
433
434fn maybe_compress(
435 buf: Vec<u8>,
436 filesystem_block_size: usize,
437 compression_algorithm: CompressionAlgorithm,
438) -> BlobData {
439 if buf.len() <= filesystem_block_size {
440 return BlobData::Uncompressed(buf); }
442 let chunked_archive_options = match compression_algorithm {
443 CompressionAlgorithm::Zstd => {
444 Type1Blob::CHUNKED_ARCHIVE_OPTIONS
446 }
447 CompressionAlgorithm::Lz4 => ChunkedArchiveOptions::V3 { compression_algorithm },
448 };
449 let archive =
450 ChunkedArchive::new(&buf, chunked_archive_options).expect("failed to compress data");
451 if archive.compressed_data_size().checked_next_multiple_of(filesystem_block_size).unwrap()
452 >= buf.len()
453 {
454 BlobData::Uncompressed(buf) } else {
456 match compression_algorithm {
457 CompressionAlgorithm::Zstd => BlobData::CompressedZstd(archive),
458 CompressionAlgorithm::Lz4 => BlobData::CompressedLz4(archive),
459 }
460 }
461}
462
463pub async fn extract_blobs(image: PathBuf, out_dir: PathBuf) -> anyhow::Result<()> {
465 if out_dir.exists() {
466 fs::remove_dir_all(&out_dir).context("Failed to remove output directory")?;
467 }
468 fs::create_dir_all(&out_dir)?;
469
470 let mut source = fs::File::open(&image)?;
474 let mut non_sparse_image = tempfile::NamedTempFile::new_in(&out_dir)?;
475 unsparse(&mut source, non_sparse_image.as_file_mut())?;
476
477 let device = DeviceHolder::new(FileBackedDevice::new(non_sparse_image.reopen()?, BLOCK_SIZE));
478 let fs = FxFilesystemBuilder::new().read_only(true).open(device).await?;
479 let vol =
480 root_volume(fs.clone()).await?.volume(BLOB_VOLUME_NAME, StoreOptions::default()).await?;
481 let root_dir = Directory::open(&vol, vol.root_directory_object_id()).await?;
482 let layer_set = root_dir.store().tree().layer_set();
483 let mut merger = layer_set.merger();
484 let mut iter = root_dir.iter(&mut merger).await?;
485 let blob_extraction_futures = futures::stream::FuturesUnordered::new();
486
487 while let Some((name, object_id, descriptor)) = iter.get() {
488 if *descriptor == fxfs::object_store::ObjectDescriptor::File {
489 let handle = fxfs::object_store::ObjectStore::open_object(
490 root_dir.owner(),
491 object_id,
492 fxfs::object_store::HandleOptions::default(),
493 None,
494 )
495 .await?;
496
497 let out_path = out_dir.join(name);
498 let mut file = std::fs::File::create(&out_path)?;
499 let mut read_buf = Vec::new();
500 let mut offset = 0;
501 let mut buf =
502 handle.allocate_buffer((handle.block_size() * READ_BUFFER_SIZE) as usize).await;
503 loop {
504 let bytes = handle.read(offset, buf.as_mut()).await?;
505 if bytes == 0 {
506 break;
507 }
508 offset += bytes as u64;
509 read_buf.write_all(&buf.as_slice()[..bytes])?;
510 }
511
512 let metadata = BlobMetadata::read_from(&handle).await?;
513 blob_extraction_futures.push(fasync::unblock(move || -> Result<(), Error> {
514 match metadata.format {
515 BlobFormat::ChunkedZstd {
516 uncompressed_size,
517 compressed_offsets,
518 chunk_size,
519 } => decompress_blob(
520 &read_buf,
521 uncompressed_size,
522 compressed_offsets,
523 chunk_size,
524 CompressionAlgorithm::Zstd,
525 &mut file,
526 ),
527 BlobFormat::ChunkedLz4 {
528 uncompressed_size,
529 compressed_offsets,
530 chunk_size,
531 } => decompress_blob(
532 &read_buf,
533 uncompressed_size,
534 compressed_offsets,
535 chunk_size,
536 CompressionAlgorithm::Lz4,
537 &mut file,
538 ),
539 BlobFormat::Uncompressed => {
540 file.write_all(&read_buf)?;
541 Ok(())
542 }
543 }
544 }));
545 }
546 iter.advance().await?;
547 }
548 blob_extraction_futures.try_collect::<()>().await?;
549 Ok(())
550}
551
552fn decompress_blob(
553 blob_data: &[u8],
554 uncompressed_size: u64,
555 compressed_offsets: Vec<u64>,
556 chunk_size: u64,
557 compression_algorithm: CompressionAlgorithm,
558 out: &mut std::fs::File,
559) -> Result<(), Error> {
560 let mut decompressor = compression_algorithm.decompressor();
561 let mut buf = vec![0; chunk_size as usize];
562 let mut total_decompressed_size = 0;
563 for i in 0..compressed_offsets.len() {
564 let start_offset = compressed_offsets[i] as usize;
565 let end_offset = if i + 1 == compressed_offsets.len() {
566 blob_data.len()
567 } else {
568 compressed_offsets[i + 1] as usize
569 };
570 let decompressed_size =
571 decompressor.decompress_into(&blob_data[start_offset..end_offset], &mut buf, i)?;
572 total_decompressed_size += decompressed_size;
573 out.write_all(&buf[..decompressed_size])?;
574 }
575 if total_decompressed_size != uncompressed_size as usize {
576 Err(anyhow!(
577 "Decompressed size does not match expected size {} {}",
578 total_decompressed_size,
579 uncompressed_size
580 ))
581 } else {
582 Ok(())
583 }
584}
585
586#[cfg(test)]
587mod tests {
588 use super::{BlobsJsonOutput, BlobsJsonOutputEntry, extract_blobs, make_blob_image};
589 use assert_matches::assert_matches;
590 use delivery_blob::compression::CompressionAlgorithm;
591 use fuchsia_async as fasync;
592 use fxfs::filesystem::FxFilesystem;
593 use fxfs::object_store::StoreOptions;
594 use fxfs::object_store::directory::Directory;
595 use fxfs::object_store::volume::root_volume;
596 use sparse::reader::SparseReader;
597 use std::fs::File;
598 use std::io::{Seek as _, SeekFrom, Write};
599 use std::path::Path;
600 use std::str::from_utf8;
601 use storage_device::DeviceHolder;
602 use storage_device::file_backed_device::FileBackedDevice;
603 use tempfile::TempDir;
604
605 #[fasync::run(10, test)]
606 async fn test_extract_blobs_zstd() {
607 let tmp = TempDir::new().unwrap();
608 let dir = tmp.path();
609
610 let input_blob_path = dir.join("input.txt");
611 let image_path = dir.join("fxfs1.blk");
612 let sparse_path = dir.join("fxfs1.sparse.blk");
613 let out_dir = dir.join("extracted_out");
614
615 let data = "C".repeat(128 * 1024);
616 std::fs::write(&input_blob_path, &data).unwrap();
617
618 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
619
620 make_blob_image(
621 image_path.to_str().unwrap(),
622 Some(sparse_path.to_str().unwrap()),
623 vec![(merkle_hash, input_blob_path.clone())],
624 dir.join("blobs1.json").to_str().unwrap(),
625 None,
626 Some(CompressionAlgorithm::Zstd),
627 )
628 .await
629 .expect("make_blob_image failed");
630
631 extract_blobs(sparse_path, out_dir.clone())
632 .await
633 .expect("Extraction failed inside extract_blobs");
634
635 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
636 let first_entry = extracted_files
637 .next()
638 .expect("No files were extracted!")
639 .expect("Failed to read directory entry");
640
641 let extracted_blob_path = first_entry.path();
642 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
643
644 assert_eq!(
645 final_len,
646 data.len() as u64,
647 "Decompressed data size does not match original size",
648 );
649 }
650
651 #[fasync::run(10, test)]
652 async fn test_extract_blobs_lz4() {
653 let tmp = TempDir::new().unwrap();
654 let dir = tmp.path();
655
656 let input_blob_path = dir.join("input.txt");
657 let image_path = dir.join("fxfs1.blk");
658 let sparse_path = dir.join("fxfs1.sparse.blk");
659 let out_dir = dir.join("extracted_out");
660
661 let data = "C".repeat(128 * 1024);
662 std::fs::write(&input_blob_path, &data).unwrap();
663
664 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
665
666 make_blob_image(
667 image_path.to_str().unwrap(),
668 Some(sparse_path.to_str().unwrap()),
669 vec![(merkle_hash, input_blob_path.clone())],
670 dir.join("blobs1.json").to_str().unwrap(),
671 None,
672 Some(CompressionAlgorithm::Lz4),
673 )
674 .await
675 .expect("make_blob_image failed");
676
677 extract_blobs(sparse_path, out_dir.clone())
678 .await
679 .expect("Extraction failed inside extract_blobs");
680
681 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
682 let first_entry = extracted_files
683 .next()
684 .expect("No files were extracted!")
685 .expect("Failed to read directory entry");
686
687 let extracted_blob_path = first_entry.path();
688 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
689
690 assert_eq!(
691 final_len,
692 data.len() as u64,
693 "Decompressed data size does not match original size",
694 );
695 }
696
697 #[fasync::run(10, test)]
698 async fn test_make_blob_image() {
699 let tmp = TempDir::new().unwrap();
700 let dir = tmp.path();
701 let blobs_in = {
702 let write_data = |path, data: &str| {
703 let mut file = File::create(&path).unwrap();
704 write!(file, "{}", data).unwrap();
705 let root = fuchsia_merkle::root_from_slice(data);
706 (root, path)
707 };
708 vec![
709 write_data(dir.join("stuff1.txt"), "Goodbye, stranger!"),
710 write_data(dir.join("stuff2.txt"), "It's been nice!"),
711 write_data(dir.join("stuff3.txt"), from_utf8(&['a' as u8; 65_537]).unwrap()),
712 ]
713 };
714
715 let dir = tmp.path();
716 let output_path = dir.join("fxfs.blk");
717 let sparse_path = dir.join("fxfs.sparse.blk");
718 let blobs_json_path = dir.join("blobs.json");
719 make_blob_image(
720 output_path.as_os_str().to_str().unwrap(),
721 Some(sparse_path.as_os_str().to_str().unwrap()),
722 blobs_in,
723 blobs_json_path.as_os_str().to_str().unwrap(),
724 None,
725 Some(CompressionAlgorithm::Zstd),
726 )
727 .await
728 .expect("make_blob_image failed");
729
730 let mut blobs_json = std::fs::OpenOptions::new()
732 .read(true)
733 .open(blobs_json_path)
734 .expect("Failed to open blob manifest");
735 let mut blobs: BlobsJsonOutput =
736 serde_json::from_reader(&mut blobs_json).expect("Failed to serialize to JSON output");
737
738 assert_eq!(blobs.len(), 3);
739 blobs.sort_by_key(|entry| entry.source_path.clone());
740
741 assert_eq!(Path::new(blobs[0].source_path.as_str()), dir.join("stuff1.txt"));
742 assert_matches!(
743 &blobs[0],
744 BlobsJsonOutputEntry {
745 merkle,
746 bytes: 18,
747 size: 4096,
748 file_size: 18,
749 merkle_tree_size: 0,
750 used_space_in_blobfs: 4096,
751 ..
752 } if merkle == "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a"
753 );
754 assert_eq!(Path::new(blobs[1].source_path.as_str()), dir.join("stuff2.txt"));
755 assert_matches!(
756 &blobs[1],
757 BlobsJsonOutputEntry {
758 merkle,
759 bytes: 15,
760 size: 4096,
761 file_size: 15,
762 merkle_tree_size: 0,
763 used_space_in_blobfs: 4096,
764 ..
765 } if merkle == "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916"
766 );
767 assert_eq!(Path::new(blobs[2].source_path.as_str()), dir.join("stuff3.txt"));
768 assert_matches!(
769 &blobs[2],
770 BlobsJsonOutputEntry {
771 merkle,
772 bytes: 65537,
773 size: 8192,
776 file_size: 65537,
777 merkle_tree_size: 308,
778 used_space_in_blobfs: 8192,
779 ..
780 } if merkle == "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d"
781 );
782
783 let unsparsed_image = {
784 let sparse_image = std::fs::OpenOptions::new().read(true).open(sparse_path).unwrap();
785 let mut reader = SparseReader::new(sparse_image).expect("Failed to parse sparse image");
786
787 let unsparsed_image_path = dir.join("fxfs.unsparsed.blk");
788 let mut unsparsed_image = std::fs::OpenOptions::new()
789 .read(true)
790 .write(true)
791 .create(true)
792 .open(unsparsed_image_path)
793 .unwrap();
794
795 std::io::copy(&mut reader, &mut unsparsed_image).expect("Failed to unsparse");
796 unsparsed_image.seek(SeekFrom::Start(0)).unwrap();
797 unsparsed_image
798 };
799
800 let orig_image = std::fs::OpenOptions::new()
801 .read(true)
802 .open(output_path.clone())
803 .expect("Failed to open image");
804
805 assert_eq!(unsparsed_image.metadata().unwrap().len(), orig_image.metadata().unwrap().len());
806
807 for image in [orig_image, unsparsed_image] {
809 let device = DeviceHolder::new(FileBackedDevice::new(image, 4096));
810 let filesystem = FxFilesystem::open(device).await.unwrap();
811 let root_volume = root_volume(filesystem.clone()).await.expect("Opening root volume");
812 let vol =
813 root_volume.volume("blob", StoreOptions::default()).await.expect("Opening volume");
814 let directory = Directory::open(&vol, vol.root_directory_object_id())
815 .await
816 .expect("Opening root dir");
817 let entries = {
818 let layer_set = directory.store().tree().layer_set();
819 let mut merger = layer_set.merger();
820 let mut iter = directory.iter(&mut merger).await.expect("iter failed");
821 let mut entries = vec![];
822 while let Some((name, _, _)) = iter.get() {
823 entries.push(name.to_string());
824 iter.advance().await.expect("advance failed");
825 }
826 entries
827 };
828 assert_eq!(
829 &entries[..],
830 &[
831 "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d",
832 "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a",
833 "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916",
834 ]
835 );
836 }
837 }
838
839 #[fasync::run(10, test)]
840 async fn test_make_uncompressed_blob_image() {
841 let tmp = TempDir::new().unwrap();
842 let dir = tmp.path();
843 let path = dir.join("large_blob.txt");
844 let mut file = File::create(&path).unwrap();
845 let data = vec![0xabu8; 32 * 1024 * 1024];
846 file.write_all(&data).unwrap();
847 let root = fuchsia_merkle::root_from_slice(&data);
848 let blobs_in = vec![(root, path)];
849
850 let compressed_path = dir.join("fxfs-compressed.blk");
851 let blobs_json_path = dir.join("blobs.json");
852 make_blob_image(
853 compressed_path.as_os_str().to_str().unwrap(),
854 None,
855 blobs_in.clone(),
856 blobs_json_path.as_os_str().to_str().unwrap(),
857 None,
858 Some(CompressionAlgorithm::Zstd),
859 )
860 .await
861 .expect("make_blob_image failed");
862
863 let uncompressed_path = dir.join("fxfs-uncompressed.blk");
864 make_blob_image(
865 uncompressed_path.as_os_str().to_str().unwrap(),
866 None,
867 blobs_in,
868 blobs_json_path.as_os_str().to_str().unwrap(),
869 None,
870 None,
871 )
872 .await
873 .expect("make_blob_image failed");
874
875 assert!(
876 std::fs::metadata(compressed_path).unwrap().len()
877 < std::fs::metadata(uncompressed_path).unwrap().len()
878 )
879 }
880
881 #[fasync::run(10, test)]
882 async fn test_make_blob_image_with_target_size() {
883 const TARGET_SIZE: u64 = 200 * 1024 * 1024;
884 let tmp = TempDir::new().unwrap();
885 let dir = tmp.path();
886 let path = dir.join("large_blob.txt");
887 let mut file = File::create(&path).unwrap();
888 let data = vec![0xabu8; 8 * 1024 * 1024];
889 file.write_all(&data).unwrap();
890 let root = fuchsia_merkle::root_from_slice(&data);
891 let blobs_in = vec![(root, path)];
892
893 let image_path = dir.join("fxfs.blk");
894 let sparse_image_path = dir.join("fxfs.sparse.blk");
895 let blobs_json_path = dir.join("blobs.json");
896 make_blob_image(
897 image_path.as_os_str().to_str().unwrap(),
898 Some(sparse_image_path.as_os_str().to_str().unwrap()),
899 blobs_in.clone(),
900 blobs_json_path.as_os_str().to_str().unwrap(),
901 Some(200 * 1024 * 1024),
902 Some(CompressionAlgorithm::Zstd),
903 )
904 .await
905 .expect("make_blob_image failed");
906
907 let image_size = std::fs::metadata(image_path).unwrap().len();
910 let sparse_image_size = std::fs::metadata(sparse_image_path).unwrap().len();
911 assert_eq!(image_size, TARGET_SIZE);
912 assert!(sparse_image_size < TARGET_SIZE, "Sparse image size: {sparse_image_size}");
913 }
914}