1use anyhow::{Context, Error, anyhow};
6use delivery_blob::Type1Blob;
7pub use delivery_blob::compression::CompressionAlgorithm;
8use delivery_blob::compression::{ChunkedArchive, ChunkedArchiveOptions};
9use fuchsia_async as fasync;
10use fuchsia_merkle::{Hash, MerkleRootBuilder};
11use futures::{SinkExt as _, StreamExt as _, TryStreamExt as _, try_join};
12use fxfs::blob_metadata::{BlobFormat, BlobMetadata, BlobMetadataLeafHashCollector};
13use fxfs::errors::FxfsError;
14use fxfs::filesystem::{FxFilesystemBuilder, OpenFxFilesystem};
15use fxfs::object_handle::{ObjectHandle, ReadObjectHandle, WriteBytes};
16use fxfs::object_store::directory::Directory;
17use fxfs::object_store::journal::RESERVED_SPACE;
18use fxfs::object_store::journal::super_block::SuperBlockInstance;
19use fxfs::object_store::transaction::{LockKey, lock_keys};
20use fxfs::object_store::volume::root_volume;
21use fxfs::object_store::{
22 DataObjectHandle, DirectWriter, HandleOptions, NewChildStoreOptions, ObjectStore, StoreOptions,
23};
24use rayon::ThreadPoolBuilder;
25use rayon::prelude::*;
26use serde::{Deserialize, Serialize};
27use sparse::unsparse;
28use std::fs;
29use std::io::{BufWriter, Read, Write};
30use std::path::PathBuf;
31use storage_device::DeviceHolder;
32use storage_device::file_backed_device::FileBackedDevice;
33
34pub const BLOB_VOLUME_NAME: &str = "blob";
35
36const BLOCK_SIZE: u32 = 4096;
37
38const READ_BUFFER_SIZE: u64 = 512;
39
40#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
41struct BlobsJsonOutputEntry {
42 source_path: String,
43 merkle: String,
44 bytes: usize,
45 size: u64,
46 file_size: usize,
47 compressed_file_size: u64,
48 merkle_tree_size: usize,
49 used_space_in_blobfs: u64,
51}
52
53type BlobsJsonOutput = Vec<BlobsJsonOutputEntry>;
54
55pub async fn make_blob_image(
66 output_image_path: &str,
67 sparse_output_image_path: Option<&str>,
68 blobs: Vec<(Hash, PathBuf)>,
69 json_output_path: &str,
70 target_size: Option<u64>,
71 compression_algorithm: Option<CompressionAlgorithm>,
72) -> Result<(), Error> {
73 let output_image = std::fs::OpenOptions::new()
74 .read(true)
75 .write(true)
76 .create(true)
77 .truncate(true)
78 .open(output_image_path)?;
79
80 let mut target_size = target_size.unwrap_or_default();
81
82 if target_size > 0 && target_size < BLOCK_SIZE as u64 {
83 return Err(anyhow!("Size {} is too small", target_size));
84 }
85 if target_size % BLOCK_SIZE as u64 > 0 {
86 return Err(anyhow!("Invalid size {} is not block-aligned", target_size));
87 }
88 let block_count = if target_size != 0 {
89 output_image.set_len(target_size).context("Failed to resize image")?;
91 target_size / BLOCK_SIZE as u64
92 } else {
93 const FOUR_GIGS: u64 = 4 * 1024 * 1024 * 1024;
98 FOUR_GIGS / BLOCK_SIZE as u64
99 };
100
101 let device = DeviceHolder::new(FileBackedDevice::new_with_block_count(
102 output_image,
103 BLOCK_SIZE,
104 block_count,
105 ));
106 let fxblob = FxBlobBuilder::new(device).await?;
107 let blobs_json = install_blobs(&fxblob, blobs, compression_algorithm).await.map_err(|e| {
108 if target_size != 0 && FxfsError::NoSpace.matches(&e) {
109 e.context(format!(
110 "Configured image size {} is too small to fit the base system image.",
111 target_size
112 ))
113 } else {
114 e
115 }
116 })?;
117 let actual_size = fxblob.finalize().await?.1;
118
119 if target_size == 0 {
120 target_size = (actual_size + RESERVED_SPACE) * 2;
123 }
124
125 if let Some(sparse_path) = sparse_output_image_path {
126 create_sparse_image(sparse_path, output_image_path, actual_size, target_size, BLOCK_SIZE)
127 .context("Failed to create sparse image")?;
128 }
129
130 if target_size != actual_size {
131 debug_assert!(target_size > actual_size);
132 let output_image =
133 std::fs::OpenOptions::new().read(true).write(true).open(output_image_path)?;
134 output_image.set_len(target_size).context("Failed to resize image")?;
135 }
136
137 let mut json_output = BufWriter::new(
138 std::fs::File::create(json_output_path).context("Failed to create JSON output file")?,
139 );
140 serde_json::to_writer_pretty(&mut json_output, &blobs_json)
141 .context("Failed to serialize to JSON output")?;
142
143 Ok(())
144}
145
146fn create_sparse_image(
147 sparse_output_image_path: &str,
148 image_path: &str,
149 actual_size: u64,
150 target_size: u64,
151 block_size: u32,
152) -> Result<(), Error> {
153 let image = std::fs::OpenOptions::new()
154 .read(true)
155 .open(image_path)
156 .with_context(|| format!("Failed to open {:?}", image_path))?;
157 let mut output = std::fs::OpenOptions::new()
158 .read(true)
159 .write(true)
160 .create(true)
161 .truncate(true)
162 .open(sparse_output_image_path)
163 .with_context(|| format!("Failed to create {:?}", sparse_output_image_path))?;
164 sparse::builder::SparseImageBuilder::new()
165 .set_block_size(block_size)
166 .add_source(sparse::builder::DataSource::Reader {
167 reader: Box::new(image),
168 size: actual_size,
169 })
170 .add_source(sparse::builder::DataSource::Skip(target_size - actual_size))
171 .build(&mut output)
172 .map_err(anyhow::Error::from)
173}
174
175pub struct FxBlobBuilder {
177 blob_directory: Directory<ObjectStore>,
178 filesystem: OpenFxFilesystem,
179}
180
181impl FxBlobBuilder {
182 pub async fn new(device: DeviceHolder) -> Result<Self, Error> {
184 let filesystem = FxFilesystemBuilder::new()
185 .format(true)
186 .trim_config(None)
187 .image_builder_mode(Some(SuperBlockInstance::A))
188 .open(device)
189 .await
190 .context("Failed to format filesystem")?;
191 filesystem.enable_allocations();
192 let root_volume = root_volume(filesystem.clone()).await?;
193 let vol = root_volume
194 .new_volume(BLOB_VOLUME_NAME, NewChildStoreOptions::default())
195 .await
196 .context("Failed to create volume")?;
197 let blob_directory = Directory::open(&vol, vol.root_directory_object_id())
198 .await
199 .context("Unable to open root blob directory")?;
200 Ok(Self { blob_directory, filesystem })
201 }
202
203 pub async fn finalize(self) -> Result<(DeviceHolder, u64), Error> {
207 self.filesystem.close().await?;
208 let actual_size = self.filesystem.allocator().maximum_offset();
209 Ok((self.filesystem.take_device().await, actual_size))
210 }
211
212 pub async fn install_blob(
214 &self,
215 blob: &BlobToInstall,
216 ) -> Result<DataObjectHandle<ObjectStore>, Error> {
217 let handle;
218 let keys = lock_keys![LockKey::object(
219 self.blob_directory.store().store_object_id(),
220 self.blob_directory.object_id(),
221 )];
222 let mut transaction = self
223 .filesystem
224 .clone()
225 .new_transaction(keys, Default::default())
226 .await
227 .context("new transaction")?;
228 handle = self
229 .blob_directory
230 .create_child_file_with_options(
231 &mut transaction,
232 &blob.hash.to_string(),
233 HandleOptions { skip_checksums: true, ..Default::default() },
235 )
236 .await
237 .context("create child file")?;
238 transaction.commit().await.context("transaction commit")?;
239
240 {
242 let mut writer = DirectWriter::new(&handle, Default::default()).await;
243 match &blob.data {
244 BlobData::Uncompressed(data) => {
245 writer.write_bytes(data).await.context("write blob contents")?;
246 }
247 BlobData::CompressedZstd(archive) | BlobData::CompressedLz4(archive) => {
248 for chunk in archive.chunks() {
249 writer
250 .write_bytes(&chunk.compressed_data)
251 .await
252 .context("write blob contents")?;
253 }
254 }
255 }
256 writer.complete().await.context("flush blob contents")?;
257 }
258
259 blob.metadata.write_to(&handle).await.context("write blob metadata")?;
261
262 Ok(handle)
263 }
264
265 pub fn generate_blob(
267 &self,
268 data: Vec<u8>,
269 compression_algorithm: Option<CompressionAlgorithm>,
270 ) -> Result<BlobToInstall, Error> {
271 BlobToInstall::new(data, self.filesystem.block_size() as usize, compression_algorithm)
272 }
273}
274
275enum BlobData {
276 Uncompressed(Vec<u8>),
277 CompressedZstd(ChunkedArchive),
278 CompressedLz4(ChunkedArchive),
279}
280
281fn compressed_offsets(chunked_archive: &ChunkedArchive) -> Vec<u64> {
282 let mut offsets = Vec::with_capacity(chunked_archive.chunks().len());
283 let mut offset: u64 = 0;
284 for chunk in chunked_archive.chunks() {
285 offsets.push(offset);
286 offset += chunk.compressed_data.len() as u64;
287 }
288 offsets
289}
290
291pub struct BlobToInstall {
293 hash: Hash,
295 data: BlobData,
297 uncompressed_size: usize,
299 metadata: BlobMetadata,
301 source: Option<PathBuf>,
304}
305
306impl BlobToInstall {
307 pub fn new(
309 data: Vec<u8>,
310 fs_block_size: usize,
311 compression_algorithm: Option<CompressionAlgorithm>,
312 ) -> Result<Self, Error> {
313 let (hash, hashes) =
314 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
315
316 let uncompressed_size = data.len();
317 let data = if let Some(compression_algorithm) = compression_algorithm {
318 maybe_compress(data, fs_block_size, compression_algorithm)
319 } else {
320 BlobData::Uncompressed(data)
321 };
322 let metadata = match &data {
323 BlobData::Uncompressed(_) => {
324 BlobMetadata { merkle_leaves: hashes, format: BlobFormat::Uncompressed }
325 }
326 BlobData::CompressedZstd(chunked_archive) => BlobMetadata {
327 merkle_leaves: hashes,
328 format: BlobFormat::ChunkedZstd {
329 uncompressed_size: uncompressed_size as u64,
330 chunk_size: chunked_archive.chunk_size() as u64,
331 compressed_offsets: compressed_offsets(&chunked_archive),
332 },
333 },
334 BlobData::CompressedLz4(chunked_archive) => BlobMetadata {
335 merkle_leaves: hashes,
336 format: BlobFormat::ChunkedLz4 {
337 uncompressed_size: uncompressed_size as u64,
338 chunk_size: chunked_archive.chunk_size() as u64,
339 compressed_offsets: compressed_offsets(&chunked_archive),
340 },
341 },
342 };
343 Ok(BlobToInstall { hash, data, uncompressed_size, metadata, source: None })
344 }
345
346 pub fn new_from_file(
349 path: PathBuf,
350 fs_block_size: usize,
351 compression_algorithm: Option<CompressionAlgorithm>,
352 ) -> Result<Self, Error> {
353 let mut data = Vec::new();
354 std::fs::File::open(&path)
355 .with_context(|| format!("Unable to open `{:?}'", &path))?
356 .read_to_end(&mut data)
357 .with_context(|| format!("Unable to read contents of `{:?}'", &path))?;
358 let blob = Self::new(data, fs_block_size, compression_algorithm)?;
359 Ok(Self { source: Some(path), ..blob })
360 }
361
362 pub fn hash(&self) -> Hash {
363 self.hash.clone()
364 }
365}
366
367async fn install_blobs(
368 fxblob: &FxBlobBuilder,
369 blobs: Vec<(Hash, PathBuf)>,
370 compression_algorithm: Option<CompressionAlgorithm>,
371) -> Result<BlobsJsonOutput, Error> {
372 let num_blobs = blobs.len();
373 let fs_block_size = fxblob.filesystem.block_size() as usize;
374 let (tx, rx) = futures::channel::mpsc::channel::<BlobToInstall>(0);
376 let num_threads: usize = std::thread::available_parallelism().unwrap().into();
378 let thread_pool = ThreadPoolBuilder::new().num_threads(num_threads).build().unwrap();
379 let generate = fasync::unblock(move || {
380 thread_pool.install(|| {
381 blobs.par_iter().try_for_each(|(hash, path)| {
382 let blob = BlobToInstall::new_from_file(
383 path.clone(),
384 fs_block_size,
385 compression_algorithm,
386 )?;
387 if &blob.hash != hash {
388 let calculated_hash = &blob.hash;
389 let path = path.display();
390 return Err(anyhow!(
391 "Hash mismatch for {path}: calculated={calculated_hash}, expected={hash}"
392 ));
393 }
394 futures::executor::block_on(tx.clone().send(blob))
395 .context("send blob to install task")
396 })
397 })?;
398 Ok(())
399 });
400 const MAX_INSTALL_CONCURRENCY: usize = 10;
402 let install = rx
403 .map(|blob| install_blob_with_json_output(fxblob, blob))
404 .buffer_unordered(MAX_INSTALL_CONCURRENCY)
405 .try_collect::<BlobsJsonOutput>();
406 let (installed_blobs, _) = try_join!(install, generate)?;
407 assert_eq!(installed_blobs.len(), num_blobs);
408 Ok(installed_blobs)
409}
410
411async fn install_blob_with_json_output(
412 fxblob: &FxBlobBuilder,
413 blob: BlobToInstall,
414) -> Result<BlobsJsonOutputEntry, Error> {
415 let handle = fxblob.install_blob(&blob).await?;
416 let properties = handle.get_properties().await.context("get properties")?;
417 let source_path = blob
418 .source
419 .expect("missing source path")
420 .to_str()
421 .context("blob path to utf8")?
422 .to_string();
423 Ok(BlobsJsonOutputEntry {
424 source_path,
425 merkle: blob.hash.to_string(),
426 bytes: blob.uncompressed_size,
427 size: properties.allocated_size,
428 file_size: blob.uncompressed_size,
429 compressed_file_size: properties.data_attribute_size,
430 merkle_tree_size: blob.metadata.serialized_size().context("blob metadata size")?,
431 used_space_in_blobfs: properties.allocated_size,
432 })
433}
434
435fn maybe_compress(
436 buf: Vec<u8>,
437 filesystem_block_size: usize,
438 compression_algorithm: CompressionAlgorithm,
439) -> BlobData {
440 if buf.len() <= filesystem_block_size {
441 return BlobData::Uncompressed(buf); }
443 let chunked_archive_options = match compression_algorithm {
444 CompressionAlgorithm::Zstd => {
445 Type1Blob::CHUNKED_ARCHIVE_OPTIONS
447 }
448 CompressionAlgorithm::Lz4 => ChunkedArchiveOptions::V3 { compression_algorithm },
449 };
450 let archive =
451 ChunkedArchive::new(&buf, chunked_archive_options).expect("failed to compress data");
452 if archive.compressed_data_size().checked_next_multiple_of(filesystem_block_size).unwrap()
453 >= buf.len()
454 {
455 BlobData::Uncompressed(buf) } else {
457 match compression_algorithm {
458 CompressionAlgorithm::Zstd => BlobData::CompressedZstd(archive),
459 CompressionAlgorithm::Lz4 => BlobData::CompressedLz4(archive),
460 }
461 }
462}
463
464pub async fn extract_blobs(image: PathBuf, out_dir: PathBuf) -> anyhow::Result<()> {
466 if out_dir.exists() {
467 fs::remove_dir_all(&out_dir).context("Failed to remove output directory")?;
468 }
469 fs::create_dir_all(&out_dir)?;
470
471 let mut source = fs::File::open(&image)?;
475 let mut non_sparse_image = tempfile::NamedTempFile::new_in(&out_dir)?;
476 unsparse(&mut source, non_sparse_image.as_file_mut()).map_err(anyhow::Error::from)?;
477
478 let device = DeviceHolder::new(FileBackedDevice::new(non_sparse_image.reopen()?, BLOCK_SIZE));
479 let fs = FxFilesystemBuilder::new().read_only(true).open(device).await?;
480 let vol =
481 root_volume(fs.clone()).await?.volume(BLOB_VOLUME_NAME, StoreOptions::default()).await?;
482 let root_dir = Directory::open(&vol, vol.root_directory_object_id()).await?;
483 let layer_set = root_dir.store().tree().layer_set();
484 let mut merger = layer_set.merger();
485 let mut iter = root_dir.iter(&mut merger).await?;
486 let blob_extraction_futures = futures::stream::FuturesUnordered::new();
487
488 while let Some((name, object_id, descriptor)) = iter.get() {
489 if *descriptor == fxfs::object_store::ObjectDescriptor::File {
490 let handle = fxfs::object_store::ObjectStore::open_object(
491 root_dir.owner(),
492 object_id,
493 fxfs::object_store::HandleOptions::default(),
494 None,
495 )
496 .await?;
497
498 let out_path = out_dir.join(name);
499 let mut file = std::fs::File::create(&out_path)?;
500 let mut read_buf = Vec::new();
501 let mut offset = 0;
502 let mut buf =
503 handle.allocate_buffer((handle.block_size() * READ_BUFFER_SIZE) as usize).await;
504 loop {
505 let bytes = handle.read(offset, buf.as_mut()).await?;
506 if bytes == 0 {
507 break;
508 }
509 offset += bytes as u64;
510 read_buf.write_all(&buf.as_slice()[..bytes])?;
511 }
512
513 let metadata = BlobMetadata::read_from(&handle).await?;
514 blob_extraction_futures.push(fasync::unblock(move || -> Result<(), Error> {
515 match metadata.format {
516 BlobFormat::ChunkedZstd {
517 uncompressed_size,
518 compressed_offsets,
519 chunk_size,
520 } => decompress_blob(
521 &read_buf,
522 uncompressed_size,
523 compressed_offsets,
524 chunk_size,
525 CompressionAlgorithm::Zstd,
526 &mut file,
527 ),
528 BlobFormat::ChunkedLz4 {
529 uncompressed_size,
530 compressed_offsets,
531 chunk_size,
532 } => decompress_blob(
533 &read_buf,
534 uncompressed_size,
535 compressed_offsets,
536 chunk_size,
537 CompressionAlgorithm::Lz4,
538 &mut file,
539 ),
540 BlobFormat::Uncompressed => {
541 file.write_all(&read_buf)?;
542 Ok(())
543 }
544 }
545 }));
546 }
547 iter.advance().await?;
548 }
549 blob_extraction_futures.try_collect::<()>().await?;
550 Ok(())
551}
552
553fn decompress_blob(
554 blob_data: &[u8],
555 uncompressed_size: u64,
556 compressed_offsets: Vec<u64>,
557 chunk_size: u64,
558 compression_algorithm: CompressionAlgorithm,
559 out: &mut std::fs::File,
560) -> Result<(), Error> {
561 let mut decompressor = compression_algorithm.decompressor();
562 let mut buf = vec![0; chunk_size as usize];
563 let mut total_decompressed_size = 0;
564 for i in 0..compressed_offsets.len() {
565 let start_offset = compressed_offsets[i] as usize;
566 let end_offset = if i + 1 == compressed_offsets.len() {
567 blob_data.len()
568 } else {
569 compressed_offsets[i + 1] as usize
570 };
571 let decompressed_size =
572 decompressor.decompress_into(&blob_data[start_offset..end_offset], &mut buf, i)?;
573 total_decompressed_size += decompressed_size;
574 out.write_all(&buf[..decompressed_size])?;
575 }
576 if total_decompressed_size != uncompressed_size as usize {
577 Err(anyhow!(
578 "Decompressed size does not match expected size {} {}",
579 total_decompressed_size,
580 uncompressed_size
581 ))
582 } else {
583 Ok(())
584 }
585}
586
587#[cfg(test)]
588mod tests {
589 use super::{BlobsJsonOutput, BlobsJsonOutputEntry, extract_blobs, make_blob_image};
590 use assert_matches::assert_matches;
591 use delivery_blob::compression::CompressionAlgorithm;
592 use fuchsia_async as fasync;
593 use fxfs::filesystem::FxFilesystem;
594 use fxfs::object_store::StoreOptions;
595 use fxfs::object_store::directory::Directory;
596 use fxfs::object_store::volume::root_volume;
597 use sparse::reader::SparseReader;
598 use std::fs::File;
599 use std::io::{Seek as _, SeekFrom, Write};
600 use std::path::Path;
601 use std::str::from_utf8;
602 use storage_device::DeviceHolder;
603 use storage_device::file_backed_device::FileBackedDevice;
604 use tempfile::TempDir;
605
606 #[fasync::run(10, test)]
607 async fn test_extract_blobs_zstd() {
608 let tmp = TempDir::new().unwrap();
609 let dir = tmp.path();
610
611 let input_blob_path = dir.join("input.txt");
612 let image_path = dir.join("fxfs1.blk");
613 let sparse_path = dir.join("fxfs1.sparse.blk");
614 let out_dir = dir.join("extracted_out");
615
616 let data = "C".repeat(128 * 1024);
617 std::fs::write(&input_blob_path, &data).unwrap();
618
619 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
620
621 make_blob_image(
622 image_path.to_str().unwrap(),
623 Some(sparse_path.to_str().unwrap()),
624 vec![(merkle_hash, input_blob_path.clone())],
625 dir.join("blobs1.json").to_str().unwrap(),
626 None,
627 Some(CompressionAlgorithm::Zstd),
628 )
629 .await
630 .expect("make_blob_image failed");
631
632 extract_blobs(sparse_path, out_dir.clone())
633 .await
634 .expect("Extraction failed inside extract_blobs");
635
636 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
637 let first_entry = extracted_files
638 .next()
639 .expect("No files were extracted!")
640 .expect("Failed to read directory entry");
641
642 let extracted_blob_path = first_entry.path();
643 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
644
645 assert_eq!(
646 final_len,
647 data.len() as u64,
648 "Decompressed data size does not match original size",
649 );
650 }
651
652 #[fasync::run(10, test)]
653 async fn test_extract_blobs_lz4() {
654 let tmp = TempDir::new().unwrap();
655 let dir = tmp.path();
656
657 let input_blob_path = dir.join("input.txt");
658 let image_path = dir.join("fxfs1.blk");
659 let sparse_path = dir.join("fxfs1.sparse.blk");
660 let out_dir = dir.join("extracted_out");
661
662 let data = "C".repeat(128 * 1024);
663 std::fs::write(&input_blob_path, &data).unwrap();
664
665 let merkle_hash = fuchsia_merkle::root_from_slice(data.as_bytes());
666
667 make_blob_image(
668 image_path.to_str().unwrap(),
669 Some(sparse_path.to_str().unwrap()),
670 vec![(merkle_hash, input_blob_path.clone())],
671 dir.join("blobs1.json").to_str().unwrap(),
672 None,
673 Some(CompressionAlgorithm::Lz4),
674 )
675 .await
676 .expect("make_blob_image failed");
677
678 extract_blobs(sparse_path, out_dir.clone())
679 .await
680 .expect("Extraction failed inside extract_blobs");
681
682 let mut extracted_files = std::fs::read_dir(&out_dir).expect("out_dir should exist");
683 let first_entry = extracted_files
684 .next()
685 .expect("No files were extracted!")
686 .expect("Failed to read directory entry");
687
688 let extracted_blob_path = first_entry.path();
689 let final_len = std::fs::metadata(&extracted_blob_path).unwrap().len();
690
691 assert_eq!(
692 final_len,
693 data.len() as u64,
694 "Decompressed data size does not match original size",
695 );
696 }
697
698 #[fasync::run(10, test)]
699 async fn test_make_blob_image() {
700 let tmp = TempDir::new().unwrap();
701 let dir = tmp.path();
702 let blobs_in = {
703 let write_data = |path, data: &str| {
704 let mut file = File::create(&path).unwrap();
705 write!(file, "{}", data).unwrap();
706 let root = fuchsia_merkle::root_from_slice(data);
707 (root, path)
708 };
709 vec![
710 write_data(dir.join("stuff1.txt"), "Goodbye, stranger!"),
711 write_data(dir.join("stuff2.txt"), "It's been nice!"),
712 write_data(dir.join("stuff3.txt"), from_utf8(&['a' as u8; 65_537]).unwrap()),
713 ]
714 };
715
716 let dir = tmp.path();
717 let output_path = dir.join("fxfs.blk");
718 let sparse_path = dir.join("fxfs.sparse.blk");
719 let blobs_json_path = dir.join("blobs.json");
720 make_blob_image(
721 output_path.as_os_str().to_str().unwrap(),
722 Some(sparse_path.as_os_str().to_str().unwrap()),
723 blobs_in,
724 blobs_json_path.as_os_str().to_str().unwrap(),
725 None,
726 Some(CompressionAlgorithm::Zstd),
727 )
728 .await
729 .expect("make_blob_image failed");
730
731 let mut blobs_json = std::fs::OpenOptions::new()
733 .read(true)
734 .open(blobs_json_path)
735 .expect("Failed to open blob manifest");
736 let mut blobs: BlobsJsonOutput =
737 serde_json::from_reader(&mut blobs_json).expect("Failed to serialize to JSON output");
738
739 assert_eq!(blobs.len(), 3);
740 blobs.sort_by_key(|entry| entry.source_path.clone());
741
742 assert_eq!(Path::new(blobs[0].source_path.as_str()), dir.join("stuff1.txt"));
743 assert_matches!(
744 &blobs[0],
745 BlobsJsonOutputEntry {
746 merkle,
747 bytes: 18,
748 size: 4096,
749 file_size: 18,
750 merkle_tree_size: 0,
751 used_space_in_blobfs: 4096,
752 ..
753 } if merkle == "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a"
754 );
755 assert_eq!(Path::new(blobs[1].source_path.as_str()), dir.join("stuff2.txt"));
756 assert_matches!(
757 &blobs[1],
758 BlobsJsonOutputEntry {
759 merkle,
760 bytes: 15,
761 size: 4096,
762 file_size: 15,
763 merkle_tree_size: 0,
764 used_space_in_blobfs: 4096,
765 ..
766 } if merkle == "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916"
767 );
768 assert_eq!(Path::new(blobs[2].source_path.as_str()), dir.join("stuff3.txt"));
769 assert_matches!(
770 &blobs[2],
771 BlobsJsonOutputEntry {
772 merkle,
773 bytes: 65537,
774 size: 8192,
777 file_size: 65537,
778 merkle_tree_size: 308,
779 used_space_in_blobfs: 8192,
780 ..
781 } if merkle == "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d"
782 );
783
784 let unsparsed_image = {
785 let sparse_image = std::fs::OpenOptions::new().read(true).open(sparse_path).unwrap();
786 let mut reader = SparseReader::new(sparse_image).expect("Failed to parse sparse image");
787
788 let unsparsed_image_path = dir.join("fxfs.unsparsed.blk");
789 let mut unsparsed_image = std::fs::OpenOptions::new()
790 .read(true)
791 .write(true)
792 .create(true)
793 .open(unsparsed_image_path)
794 .unwrap();
795
796 std::io::copy(&mut reader, &mut unsparsed_image).expect("Failed to unsparse");
797 unsparsed_image.seek(SeekFrom::Start(0)).unwrap();
798 unsparsed_image
799 };
800
801 let orig_image = std::fs::OpenOptions::new()
802 .read(true)
803 .open(output_path.clone())
804 .expect("Failed to open image");
805
806 assert_eq!(unsparsed_image.metadata().unwrap().len(), orig_image.metadata().unwrap().len());
807
808 for image in [orig_image, unsparsed_image] {
810 let device = DeviceHolder::new(FileBackedDevice::new(image, 4096));
811 let filesystem = FxFilesystem::open(device).await.unwrap();
812 let root_volume = root_volume(filesystem.clone()).await.expect("Opening root volume");
813 let vol =
814 root_volume.volume("blob", StoreOptions::default()).await.expect("Opening volume");
815 let directory = Directory::open(&vol, vol.root_directory_object_id())
816 .await
817 .expect("Opening root dir");
818 let entries = {
819 let layer_set = directory.store().tree().layer_set();
820 let mut merger = layer_set.merger();
821 let mut iter = directory.iter(&mut merger).await.expect("iter failed");
822 let mut entries = vec![];
823 while let Some((name, _, _)) = iter.get() {
824 entries.push(name.to_string());
825 iter.advance().await.expect("advance failed");
826 }
827 entries
828 };
829 assert_eq!(
830 &entries[..],
831 &[
832 "1194c76d2d3b61f29df97a85ede7b2fd2b293b452f53072356e3c5c939c8131d",
833 "9a24fe2fb8da617f39d303750bbe23f4e03a8b5f4d52bc90b2e5e9e44daddb3a",
834 "deebe5d5a0a42a51a293b511d0368e6f2b4da522ee0f05c6ae728c77d904f916",
835 ]
836 );
837 }
838 }
839
840 #[fasync::run(10, test)]
841 async fn test_make_uncompressed_blob_image() {
842 let tmp = TempDir::new().unwrap();
843 let dir = tmp.path();
844 let path = dir.join("large_blob.txt");
845 let mut file = File::create(&path).unwrap();
846 let data = vec![0xabu8; 32 * 1024 * 1024];
847 file.write_all(&data).unwrap();
848 let root = fuchsia_merkle::root_from_slice(&data);
849 let blobs_in = vec![(root, path)];
850
851 let compressed_path = dir.join("fxfs-compressed.blk");
852 let blobs_json_path = dir.join("blobs.json");
853 make_blob_image(
854 compressed_path.as_os_str().to_str().unwrap(),
855 None,
856 blobs_in.clone(),
857 blobs_json_path.as_os_str().to_str().unwrap(),
858 None,
859 Some(CompressionAlgorithm::Zstd),
860 )
861 .await
862 .expect("make_blob_image failed");
863
864 let uncompressed_path = dir.join("fxfs-uncompressed.blk");
865 make_blob_image(
866 uncompressed_path.as_os_str().to_str().unwrap(),
867 None,
868 blobs_in,
869 blobs_json_path.as_os_str().to_str().unwrap(),
870 None,
871 None,
872 )
873 .await
874 .expect("make_blob_image failed");
875
876 assert!(
877 std::fs::metadata(compressed_path).unwrap().len()
878 < std::fs::metadata(uncompressed_path).unwrap().len()
879 )
880 }
881
882 #[fasync::run(10, test)]
883 async fn test_make_blob_image_with_target_size() {
884 const TARGET_SIZE: u64 = 200 * 1024 * 1024;
885 let tmp = TempDir::new().unwrap();
886 let dir = tmp.path();
887 let path = dir.join("large_blob.txt");
888 let mut file = File::create(&path).unwrap();
889 let data = vec![0xabu8; 8 * 1024 * 1024];
890 file.write_all(&data).unwrap();
891 let root = fuchsia_merkle::root_from_slice(&data);
892 let blobs_in = vec![(root, path)];
893
894 let image_path = dir.join("fxfs.blk");
895 let sparse_image_path = dir.join("fxfs.sparse.blk");
896 let blobs_json_path = dir.join("blobs.json");
897 make_blob_image(
898 image_path.as_os_str().to_str().unwrap(),
899 Some(sparse_image_path.as_os_str().to_str().unwrap()),
900 blobs_in.clone(),
901 blobs_json_path.as_os_str().to_str().unwrap(),
902 Some(200 * 1024 * 1024),
903 Some(CompressionAlgorithm::Zstd),
904 )
905 .await
906 .expect("make_blob_image failed");
907
908 let image_size = std::fs::metadata(image_path).unwrap().len();
911 let sparse_image_size = std::fs::metadata(sparse_image_path).unwrap().len();
912 assert_eq!(image_size, TARGET_SIZE);
913 assert!(sparse_image_size < TARGET_SIZE, "Sparse image size: {sparse_image_size}");
914 }
915}