1use crate::compression::{ChunkedArchive, ChunkedArchiveOptions, ChunkedDecompressor};
25use crate::format::SerializedType1Blob;
26use serde::{Deserialize, Serialize};
27use static_assertions::assert_eq_size;
28use thiserror::Error;
29use zerocopy::{IntoBytes, Ref};
30
31pub mod compression;
32mod format;
33
34assert_eq_size!(usize, u64);
36
37pub const DELIVERY_PATH_PREFIX: &'static str = "v1-";
39
40pub fn generate(delivery_type: DeliveryBlobType, data: &[u8]) -> Vec<u8> {
42 match delivery_type {
43 DeliveryBlobType::Type1 => Type1Blob::generate(data, CompressionMode::Attempt),
44 _ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
45 }
46}
47
48pub fn generate_to(
51 delivery_type: DeliveryBlobType,
52 data: &[u8],
53 writer: impl std::io::Write,
54) -> Result<(), std::io::Error> {
55 match delivery_type {
56 DeliveryBlobType::Type1 => Type1Blob::generate_to(data, CompressionMode::Attempt, writer),
57 _ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
58 }
59}
60
61pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
63 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
64 match header.delivery_type {
65 DeliveryBlobType::Type1 => Type1Blob::decompressed_size(delivery_blob),
66 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
67 }
68}
69
70pub fn decompressed_size_from_reader(
72 mut reader: impl std::io::Read,
73) -> Result<u64, DecompressError> {
74 let mut buf = vec![];
75 loop {
76 let already_read = buf.len();
77 let new_size = already_read + 4096;
78 buf.resize(new_size, 0);
79 let new_size = already_read + reader.read(&mut buf[already_read..new_size])?;
80 if new_size == already_read {
81 return Err(DecompressError::NeedMoreData);
82 }
83 buf.truncate(new_size);
84 match decompressed_size(&buf) {
85 Ok(size) => {
86 return Ok(size);
87 }
88 Err(DecompressError::NeedMoreData) => {}
89 Err(e) => {
90 return Err(e);
91 }
92 }
93 }
94}
95
96pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
98 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
99 match header.delivery_type {
100 DeliveryBlobType::Type1 => Type1Blob::decompress(delivery_blob),
101 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
102 }
103}
104
105pub fn decompress_to(
108 delivery_blob: &[u8],
109 writer: impl std::io::Write,
110) -> Result<(), DecompressError> {
111 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
112 match header.delivery_type {
113 DeliveryBlobType::Type1 => Type1Blob::decompress_to(delivery_blob, writer),
114 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
115 }
116}
117
118pub fn calculate_digest(delivery_blob: &[u8]) -> Result<fuchsia_merkle::Hash, DecompressError> {
121 let mut writer = fuchsia_merkle::BufferedMerkleRootBuilder::default();
122 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
123 match header.delivery_type {
124 DeliveryBlobType::Type1 => {
125 let () = Type1Blob::decompress_to(delivery_blob, &mut writer)?;
126 }
127 _ => return Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
128 }
129 Ok(writer.complete())
130}
131
132pub fn delivery_blob_path(blob_name: impl std::fmt::Display) -> String {
134 format!("{}{}", DELIVERY_PATH_PREFIX, blob_name)
135}
136
137#[derive(Clone, Copy, Debug, Eq, Error, PartialEq)]
138pub enum DeliveryBlobError {
139 #[error("Invalid or unsupported delivery blob type.")]
140 InvalidType,
141
142 #[error("Delivery blob header has incorrect magic.")]
143 BadMagic,
144
145 #[error("Integrity/checksum or other validity checks failed.")]
146 IntegrityError,
147}
148
149#[derive(Debug, Error)]
150pub enum DecompressError {
151 #[error("DeliveryBlob error")]
152 DeliveryBlob(#[from] DeliveryBlobError),
153
154 #[error("ChunkedArchive error")]
155 ChunkedArchive(#[from] compression::ChunkedArchiveError),
156
157 #[error("Need more data")]
158 NeedMoreData,
159
160 #[error("io error")]
161 IoError(#[from] std::io::Error),
162}
163
164#[cfg(target_os = "fuchsia")]
165impl From<DeliveryBlobError> for zx::Status {
166 fn from(value: DeliveryBlobError) -> Self {
167 match value {
168 DeliveryBlobError::InvalidType => zx::Status::NOT_SUPPORTED,
170 DeliveryBlobError::BadMagic | DeliveryBlobError::IntegrityError => {
172 zx::Status::IO_DATA_INTEGRITY
173 }
174 }
175 }
176}
177
178#[derive(Clone, Copy, Debug, PartialEq, Eq)]
180pub struct DeliveryBlobHeader {
181 pub delivery_type: DeliveryBlobType,
182 pub header_length: u32,
183}
184
185impl DeliveryBlobHeader {
186 pub fn parse(data: &[u8]) -> Result<Option<DeliveryBlobHeader>, DeliveryBlobError> {
190 let Ok((serialized_header, _metadata_and_payload)) =
191 Ref::<_, format::SerializedHeader>::from_prefix(data)
192 else {
193 return Ok(None);
194 };
195 serialized_header.decode().map(Some)
196 }
197}
198
199#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
204#[repr(u32)]
205pub enum DeliveryBlobType {
206 Reserved = 0,
208 Type1 = 1,
210}
211
212impl TryFrom<u32> for DeliveryBlobType {
213 type Error = DeliveryBlobError;
214 fn try_from(value: u32) -> Result<Self, Self::Error> {
215 match value {
216 value if value == DeliveryBlobType::Reserved as u32 => Ok(DeliveryBlobType::Reserved),
217 value if value == DeliveryBlobType::Type1 as u32 => Ok(DeliveryBlobType::Type1),
218 _ => Err(DeliveryBlobError::InvalidType),
219 }
220 }
221}
222
223impl From<DeliveryBlobType> for u32 {
224 fn from(value: DeliveryBlobType) -> Self {
225 value as u32
226 }
227}
228
229#[derive(Clone, Copy, Debug, Eq, PartialEq)]
231pub enum CompressionMode {
232 Never,
234 Attempt,
236 Always,
238}
239
240#[derive(Clone, Copy, Debug)]
246pub struct Type1Blob {
247 pub header: DeliveryBlobHeader,
249 pub payload_length: usize,
251 pub is_compressed: bool,
252}
253
254impl Type1Blob {
255 pub const HEADER: DeliveryBlobHeader = DeliveryBlobHeader {
256 delivery_type: DeliveryBlobType::Type1,
257 header_length: std::mem::size_of::<SerializedType1Blob>() as u32,
258 };
259
260 pub const CHUNKED_ARCHIVE_OPTIONS: ChunkedArchiveOptions = ChunkedArchiveOptions::V2 {
261 chunk_alignment: fuchsia_merkle::BLOCK_SIZE,
262 minimum_chunk_size: 32 * 1024,
263 compression_level: 14,
264 };
265
266 pub fn generate(data: &[u8], mode: CompressionMode) -> Vec<u8> {
271 let mut delivery_blob: Vec<u8> = vec![];
272 Self::generate_to(data, mode, &mut delivery_blob).unwrap();
273 delivery_blob
274 }
275
276 pub fn generate_to(
282 data: &[u8],
283 mode: CompressionMode,
284 mut writer: impl std::io::Write,
285 ) -> Result<(), std::io::Error> {
286 let compressed = match mode {
288 CompressionMode::Attempt | CompressionMode::Always => {
289 let compressed = ChunkedArchive::new(data, Self::CHUNKED_ARCHIVE_OPTIONS)
290 .expect("failed to compress data");
291 if mode == CompressionMode::Always || compressed.serialized_size() <= data.len() {
292 Some(compressed)
293 } else {
294 None
295 }
296 }
297 CompressionMode::Never => None,
298 };
299
300 let payload_length =
302 compressed.as_ref().map(|archive| archive.serialized_size()).unwrap_or(data.len());
303 let header =
304 Self { header: Type1Blob::HEADER, payload_length, is_compressed: compressed.is_some() };
305 let serialized_header: SerializedType1Blob = header.into();
306 writer.write_all(serialized_header.as_bytes())?;
307
308 if let Some(archive) = compressed {
310 archive.write(writer)?;
311 } else {
312 writer.write_all(data)?;
313 }
314 Ok(())
315 }
316
317 pub fn parse(data: &[u8]) -> Result<Option<(Type1Blob, &[u8])>, DeliveryBlobError> {
322 let Ok((serialized_header, payload)) = Ref::<_, SerializedType1Blob>::from_prefix(data)
323 else {
324 return Ok(None);
325 };
326 serialized_header.decode().map(|metadata| Some((metadata, payload)))
327 }
328
329 pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
331 let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
332 if !header.is_compressed {
333 return Ok(header.payload_length as u64);
334 }
335
336 let (decoded_archive, _chunk_data) =
337 compression::decode_archive(payload, header.payload_length)?
338 .ok_or(DecompressError::NeedMoreData)?;
339 Ok(decoded_archive.decompressed_size() as u64)
340 }
341
342 pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
344 let mut decompressed = vec![];
345 decompressed.reserve(Self::decompressed_size(delivery_blob)? as usize);
346 Self::decompress_to(delivery_blob, &mut decompressed)?;
347 Ok(decompressed)
348 }
349
350 pub fn decompress_to(
352 delivery_blob: &[u8],
353 mut writer: impl std::io::Write,
354 ) -> Result<(), DecompressError> {
355 let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
356 if !header.is_compressed {
357 return Ok(writer.write_all(payload)?);
358 }
359
360 let (decoded_archive, chunk_data) =
361 compression::decode_archive(payload, header.payload_length)?
362 .ok_or(DecompressError::NeedMoreData)?;
363 let mut decompressor = ChunkedDecompressor::new(decoded_archive)?;
364 let mut result = Ok(());
365 let mut chunk_callback = |chunk: &[u8]| {
366 if let Err(e) = writer.write_all(chunk) {
367 result = Err(e.into());
368 }
369 };
370 decompressor.update(chunk_data, &mut chunk_callback)?;
371 result
372 }
373}
374
375#[cfg(test)]
376mod tests {
377
378 use super::*;
379 use rand::Rng;
380
381 const DATA_LEN: usize = 500_000;
382
383 #[test]
384 fn compression_mode_never() {
385 let data: Vec<u8> = vec![0; DATA_LEN];
386 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Never);
387 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
389 assert!(!header.is_compressed);
390 assert_eq!(header.payload_length, data.len());
391 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
392 }
393
394 #[test]
395 fn compression_mode_always() {
396 let data: Vec<u8> = {
397 let range = rand::distr::Uniform::<u8>::new_inclusive(0, 255).unwrap();
398 rand::rng().sample_iter(&range).take(DATA_LEN).collect()
399 };
400 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
401 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
402 assert!(header.is_compressed);
404 assert!(header.payload_length > data.len());
405 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
406 }
407
408 #[test]
409 fn compression_mode_attempt_uncompressible() {
410 let data: Vec<u8> = {
411 let range = rand::distr::Uniform::<u8>::new_inclusive(0, 255).unwrap();
412 rand::rng().sample_iter(&range).take(DATA_LEN).collect()
413 };
414 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
416 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
417 assert!(!header.is_compressed);
418 assert_eq!(header.payload_length, data.len());
419 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
420 }
421
422 #[test]
423 fn compression_mode_attempt_compressible() {
424 let data: Vec<u8> = vec![0; DATA_LEN];
425 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
426 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
427 assert!(header.is_compressed);
429 assert!(header.payload_length < data.len());
430 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
431 }
432
433 #[test]
434 fn get_decompressed_size() {
435 let data: Vec<u8> = {
436 let range = rand::distr::Uniform::<u8>::new_inclusive(0, 255).unwrap();
437 rand::rng().sample_iter(&range).take(DATA_LEN).collect()
438 };
439 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
440 assert_eq!(decompressed_size(&delivery_blob).unwrap(), DATA_LEN as u64);
441 assert_eq!(decompressed_size_from_reader(&delivery_blob[..]).unwrap(), DATA_LEN as u64);
442 }
443
444 #[test]
445 fn test_calculate_digest() {
446 let data: Vec<u8> = {
447 let range = rand::distr::Uniform::<u8>::new_inclusive(0, 255).unwrap();
448 rand::rng().sample_iter(&range).take(DATA_LEN).collect()
449 };
450 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
451 assert_eq!(
452 calculate_digest(&delivery_blob).unwrap(),
453 fuchsia_merkle::root_from_slice(&data)
454 );
455 }
456}