use crate::compression::{ChunkedArchive, ChunkedDecompressor};
use crate::format::SerializedType1Blob;
use serde::{Deserialize, Serialize};
use static_assertions::assert_eq_size;
use thiserror::Error;
use zerocopy::{IntoBytes, Ref};
pub mod compression;
mod format;
assert_eq_size!(usize, u64);
pub const DELIVERY_PATH_PREFIX: &'static str = "v1-";
pub fn generate(delivery_type: DeliveryBlobType, data: &[u8]) -> Vec<u8> {
match delivery_type {
DeliveryBlobType::Type1 => Type1Blob::generate(data, CompressionMode::Attempt),
_ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
}
}
pub fn generate_to(
delivery_type: DeliveryBlobType,
data: &[u8],
writer: impl std::io::Write,
) -> Result<(), std::io::Error> {
match delivery_type {
DeliveryBlobType::Type1 => Type1Blob::generate_to(data, CompressionMode::Attempt, writer),
_ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
}
}
pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
match header.delivery_type {
DeliveryBlobType::Type1 => Type1Blob::decompressed_size(delivery_blob),
_ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
}
}
pub fn decompressed_size_from_reader(
mut reader: impl std::io::Read,
) -> Result<u64, DecompressError> {
let mut buf = vec![];
loop {
let already_read = buf.len();
let new_size = already_read + 4096;
buf.resize(new_size, 0);
let new_size = already_read + reader.read(&mut buf[already_read..new_size])?;
if new_size == already_read {
return Err(DecompressError::NeedMoreData);
}
buf.truncate(new_size);
match decompressed_size(&buf) {
Ok(size) => {
return Ok(size);
}
Err(DecompressError::NeedMoreData) => {}
Err(e) => {
return Err(e);
}
}
}
}
pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
match header.delivery_type {
DeliveryBlobType::Type1 => Type1Blob::decompress(delivery_blob),
_ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
}
}
pub fn decompress_to(
delivery_blob: &[u8],
writer: impl std::io::Write,
) -> Result<(), DecompressError> {
let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
match header.delivery_type {
DeliveryBlobType::Type1 => Type1Blob::decompress_to(delivery_blob, writer),
_ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
}
}
pub fn calculate_digest(delivery_blob: &[u8]) -> Result<fuchsia_merkle::Hash, DecompressError> {
let mut writer = fuchsia_merkle::MerkleTreeWriter::new(std::io::sink());
let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
match header.delivery_type {
DeliveryBlobType::Type1 => {
let () = Type1Blob::decompress_to(delivery_blob, &mut writer)?;
}
_ => return Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
}
Ok(writer.finish().root())
}
pub fn delivery_blob_path(blob_name: impl std::fmt::Display) -> String {
format!("{}{}", DELIVERY_PATH_PREFIX, blob_name)
}
#[derive(Clone, Copy, Debug, Eq, Error, PartialEq)]
pub enum DeliveryBlobError {
#[error("Invalid or unsupported delivery blob type.")]
InvalidType,
#[error("Delivery blob header has incorrect magic.")]
BadMagic,
#[error("Integrity/checksum or other validity checks failed.")]
IntegrityError,
}
#[derive(Debug, Error)]
pub enum DecompressError {
#[error("DeliveryBlob error")]
DeliveryBlob(#[from] DeliveryBlobError),
#[error("ChunkedArchive error")]
ChunkedArchive(#[from] compression::ChunkedArchiveError),
#[error("Need more data")]
NeedMoreData,
#[error("io error")]
IoError(#[from] std::io::Error),
}
#[cfg(target_os = "fuchsia")]
impl From<DeliveryBlobError> for zx::Status {
fn from(value: DeliveryBlobError) -> Self {
match value {
DeliveryBlobError::InvalidType => zx::Status::NOT_SUPPORTED,
DeliveryBlobError::BadMagic | DeliveryBlobError::IntegrityError => {
zx::Status::IO_DATA_INTEGRITY
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct DeliveryBlobHeader {
pub delivery_type: DeliveryBlobType,
pub header_length: u32,
}
impl DeliveryBlobHeader {
pub fn parse(data: &[u8]) -> Result<Option<DeliveryBlobHeader>, DeliveryBlobError> {
let Ok((serialized_header, _metadata_and_payload)) =
Ref::<_, format::SerializedHeader>::from_prefix(data)
else {
return Ok(None);
};
serialized_header.decode().map(Some)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[repr(u32)]
pub enum DeliveryBlobType {
Reserved = 0,
Type1 = 1,
}
impl TryFrom<u32> for DeliveryBlobType {
type Error = DeliveryBlobError;
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
value if value == DeliveryBlobType::Reserved as u32 => Ok(DeliveryBlobType::Reserved),
value if value == DeliveryBlobType::Type1 as u32 => Ok(DeliveryBlobType::Type1),
_ => Err(DeliveryBlobError::InvalidType),
}
}
}
impl From<DeliveryBlobType> for u32 {
fn from(value: DeliveryBlobType) -> Self {
value as u32
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum CompressionMode {
Never,
Attempt,
Always,
}
#[derive(Clone, Copy, Debug)]
pub struct Type1Blob {
pub header: DeliveryBlobHeader,
pub payload_length: usize,
pub is_compressed: bool,
}
impl Type1Blob {
pub const HEADER: DeliveryBlobHeader = DeliveryBlobHeader {
delivery_type: DeliveryBlobType::Type1,
header_length: std::mem::size_of::<SerializedType1Blob>() as u32,
};
const CHUNK_ALIGNMENT: usize = fuchsia_merkle::BLOCK_SIZE;
pub fn generate(data: &[u8], mode: CompressionMode) -> Vec<u8> {
let mut delivery_blob: Vec<u8> = vec![];
Self::generate_to(data, mode, &mut delivery_blob).unwrap();
delivery_blob
}
pub fn generate_to(
data: &[u8],
mode: CompressionMode,
mut writer: impl std::io::Write,
) -> Result<(), std::io::Error> {
let compressed = match mode {
CompressionMode::Attempt | CompressionMode::Always => {
let compressed = ChunkedArchive::new(data, Self::CHUNK_ALIGNMENT)
.expect("failed to compress data");
if mode == CompressionMode::Always || compressed.serialized_size() <= data.len() {
Some(compressed)
} else {
None
}
}
CompressionMode::Never => None,
};
let payload_length =
compressed.as_ref().map(|archive| archive.serialized_size()).unwrap_or(data.len());
let header =
Self { header: Type1Blob::HEADER, payload_length, is_compressed: compressed.is_some() };
let serialized_header: SerializedType1Blob = header.into();
writer.write_all(serialized_header.as_bytes())?;
if let Some(archive) = compressed {
archive.write(writer)?;
} else {
writer.write_all(data)?;
}
Ok(())
}
pub fn parse(data: &[u8]) -> Result<Option<(Type1Blob, &[u8])>, DeliveryBlobError> {
let Ok((serialized_header, payload)) = Ref::<_, SerializedType1Blob>::from_prefix(data)
else {
return Ok(None);
};
serialized_header.decode().map(|metadata| Some((metadata, payload)))
}
pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
if !header.is_compressed {
return Ok(header.payload_length as u64);
}
let (seek_table, _chunk_data) =
compression::decode_archive(payload, header.payload_length)?
.ok_or(DecompressError::NeedMoreData)?;
Ok(seek_table.into_iter().map(|chunk| chunk.decompressed_range.len() as u64).sum())
}
pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
let mut decompressed = vec![];
decompressed.reserve(Self::decompressed_size(delivery_blob)? as usize);
Self::decompress_to(delivery_blob, &mut decompressed)?;
Ok(decompressed)
}
pub fn decompress_to(
delivery_blob: &[u8],
mut writer: impl std::io::Write,
) -> Result<(), DecompressError> {
let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
if !header.is_compressed {
return Ok(writer.write_all(payload)?);
}
let (seek_table, chunk_data) = compression::decode_archive(payload, header.payload_length)?
.ok_or(DecompressError::NeedMoreData)?;
let mut decompressor = ChunkedDecompressor::new(seek_table)?;
let mut result = Ok(());
let mut chunk_callback = |chunk: &[u8]| {
if let Err(e) = writer.write_all(chunk) {
result = Err(e.into());
}
};
decompressor.update(chunk_data, &mut chunk_callback)?;
result
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::Rng;
const DATA_LEN: usize = 500_000;
#[test]
fn compression_mode_never() {
let data: Vec<u8> = vec![0; DATA_LEN];
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Never);
let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
assert!(!header.is_compressed);
assert_eq!(header.payload_length, data.len());
assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
}
#[test]
fn compression_mode_always() {
let data: Vec<u8> = {
let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
};
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
assert!(header.is_compressed);
assert!(header.payload_length > data.len());
assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
}
#[test]
fn compression_mode_attempt_uncompressible() {
let data: Vec<u8> = {
let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
};
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
assert!(!header.is_compressed);
assert_eq!(header.payload_length, data.len());
assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
}
#[test]
fn compression_mode_attempt_compressible() {
let data: Vec<u8> = vec![0; DATA_LEN];
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
assert!(header.is_compressed);
assert!(header.payload_length < data.len());
assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
}
#[test]
fn get_decompressed_size() {
let data: Vec<u8> = {
let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
};
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
assert_eq!(decompressed_size(&delivery_blob).unwrap(), DATA_LEN as u64);
assert_eq!(decompressed_size_from_reader(&delivery_blob[..]).unwrap(), DATA_LEN as u64);
}
#[test]
fn test_calculate_digest() {
let data: Vec<u8> = {
let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
};
let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
assert_eq!(
calculate_digest(&delivery_blob).unwrap(),
fuchsia_merkle::from_slice(&data).root()
);
}
}