1use crate::errors::FxfsError;
6use crate::lsm_tree::Query;
7use crate::lsm_tree::types::{ItemRef, LayerIterator};
8use crate::object_handle::ObjectHandle;
9use crate::object_store::object_record::{AttributeKey, ObjectKey, ObjectKeyData, ObjectValue};
10use crate::object_store::{
11 BLOB_MERKLE_ATTRIBUTE_ID, BLOB_METADATA_ATTRIBUTE_ID, DataObjectHandle,
12 FSVERITY_MERKLE_ATTRIBUTE_ID, HandleOwner,
13};
14use crate::serialized_types::{Versioned, VersionedLatest};
15use anyhow::{Context, Error};
16use fprint::TypeFingerprint;
17use fuchsia_merkle::{Hash, LeafHashCollector, MerkleVerifier};
18use serde::{Deserialize, Serialize};
19
20#[derive(Serialize, Deserialize, Debug)]
21pub struct BlobMetadataUnversioned {
22 pub hashes: Vec<[u8; 32]>,
23 pub chunk_size: u64,
24 pub compressed_offsets: Vec<u64>,
25 pub uncompressed_size: u64,
26}
27
28pub type BlobMetadata = BlobMetadataV53;
29pub type BlobFormat = BlobFormatV53;
30pub type MerkleLeaves = Vec<[u8; 32]>;
31
32impl BlobMetadata {
33 pub async fn read_from<S: HandleOwner>(
36 blob_object: &DataObjectHandle<S>,
37 ) -> Result<Self, Error> {
38 let store = blob_object.store();
39 let layer_set = store.tree().layer_set();
40 let mut merger = layer_set.merger();
41 static_assertions::const_assert!(BLOB_MERKLE_ATTRIBUTE_ID < BLOB_METADATA_ATTRIBUTE_ID);
47 let key = ObjectKey::attribute(
48 blob_object.object_id(),
49 BLOB_MERKLE_ATTRIBUTE_ID,
50 AttributeKey::Attribute,
51 );
52 let iter = merger.query(Query::FullRange(&key)).await?;
53 match iter.get() {
54 Some(ItemRef {
55 key:
56 ObjectKey {
57 object_id,
58 data:
59 ObjectKeyData::Attribute(BLOB_MERKLE_ATTRIBUTE_ID, AttributeKey::Attribute),
60 },
61 value,
62 ..
63 }) if *object_id == blob_object.object_id() => match value {
64 ObjectValue::Attribute { .. } => {
65 let serialized_metadata = blob_object.read_attr_from_iter(iter).await?;
66 let old_metadata: BlobMetadataUnversioned =
67 bincode::deserialize_from(&*serialized_metadata)?;
68 Ok(Self::from(old_metadata))
69 }
70 _ => Err(FxfsError::Inconsistent.into()),
71 },
72 Some(ItemRef {
73 key:
74 ObjectKey {
75 object_id,
76 data:
77 ObjectKeyData::Attribute(
78 BLOB_METADATA_ATTRIBUTE_ID,
79 AttributeKey::Attribute,
80 ),
81 },
82 value,
83 ..
84 }) if *object_id == blob_object.object_id() => match value {
85 ObjectValue::Attribute { .. } => {
86 let serialized_metadata = blob_object.read_attr_from_iter(iter).await?;
87 Ok(Self::deserialize_with_version(&mut &*serialized_metadata)?.0)
88 }
89 _ => Err(FxfsError::Inconsistent.into()),
90 },
91 Some(ItemRef {
92 key:
93 ObjectKey {
94 object_id,
95 data:
96 ObjectKeyData::Attribute(
97 FSVERITY_MERKLE_ATTRIBUTE_ID,
98 AttributeKey::Attribute,
99 ),
100 },
101 ..
102 }) if *object_id == blob_object.object_id() => {
103 Err(FxfsError::Inconsistent.into())
109 }
110 _ => Ok(Self::empty()),
112 }
113 }
114
115 pub async fn write_to<S: HandleOwner>(
118 &self,
119 blob_object: &DataObjectHandle<S>,
120 ) -> Result<(), Error> {
121 if self.is_empty() {
123 return Ok(());
124 }
125 let mut buf = Vec::new();
126 self.serialize_with_version(&mut buf)?;
127 blob_object
128 .write_attr(BLOB_METADATA_ATTRIBUTE_ID, &buf)
129 .await
130 .context("Failed to write blob metadata attribute.")
131 }
132
133 pub fn serialized_size(&self) -> Result<usize, Error> {
136 if self.is_empty() {
137 return Ok(0);
138 }
139 struct CountingWriter(usize);
140 impl std::io::Write for CountingWriter {
141 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
142 self.0 += buf.len();
143 Ok(buf.len())
144 }
145 fn flush(&mut self) -> std::io::Result<()> {
146 Ok(())
147 }
148 }
149 let mut writer = CountingWriter(0);
150 self.serialize_with_version(&mut writer)?;
151 Ok(writer.0)
152 }
153
154 pub fn into_merkle_verifier(self, root: Hash) -> Result<MerkleVerifier, Error> {
156 let hashes = if self.merkle_leaves.is_empty() {
157 Box::new([root])
158 } else {
159 self.merkle_leaves.into_iter().map(Into::into).collect::<Box<[Hash]>>()
164 };
165 Ok(MerkleVerifier::new(root, hashes)?)
166 }
167
168 pub fn empty() -> Self {
171 Self { merkle_leaves: Vec::new(), format: BlobFormatV53::Uncompressed }
174 }
175
176 fn is_empty(&self) -> bool {
177 *self == Self::empty()
178 }
179}
180
181#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, TypeFingerprint)]
182pub struct BlobMetadataV53 {
183 #[serde(with = "merkle_serialization")]
184 pub merkle_leaves: MerkleLeaves,
185 pub format: BlobFormatV53,
186}
187
188impl Versioned for BlobMetadataV53 {
189 fn max_serialized_size() -> Option<u64> {
190 None
192 }
193}
194
195impl From<BlobMetadataUnversioned> for BlobMetadataV53 {
196 fn from(old: BlobMetadataUnversioned) -> Self {
197 if old.compressed_offsets.is_empty() {
198 Self { merkle_leaves: old.hashes, format: BlobFormat::Uncompressed }
199 } else {
200 Self {
201 merkle_leaves: old.hashes,
202 format: BlobFormat::ChunkedZstd {
203 uncompressed_size: old.uncompressed_size,
204 chunk_size: old.chunk_size,
205 compressed_offsets: old.compressed_offsets,
206 },
207 }
208 }
209 }
210}
211
212#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, TypeFingerprint)]
213pub enum BlobFormatV53 {
214 Uncompressed,
215 ChunkedZstd { uncompressed_size: u64, chunk_size: u64, compressed_offsets: Vec<u64> },
216 ChunkedLz4 { uncompressed_size: u64, chunk_size: u64, compressed_offsets: Vec<u64> },
217}
218
219mod merkle_serialization {
222 use super::MerkleLeaves;
223 use serde::{Deserializer, Serializer};
224 use zerocopy::{FromBytes, IntoBytes};
225
226 pub fn serialize<S>(value: &MerkleLeaves, serializer: S) -> Result<S::Ok, S::Error>
227 where
228 S: Serializer,
229 {
230 let bytes = value.as_slice().as_bytes();
231 serializer.serialize_bytes(bytes)
232 }
233
234 pub fn deserialize<'de, D>(deserializer: D) -> Result<MerkleLeaves, D::Error>
235 where
236 D: Deserializer<'de>,
237 {
238 struct Visitor;
239 impl<'de> serde::de::Visitor<'de> for Visitor {
240 type Value = MerkleLeaves;
241
242 fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
243 formatter.write_str("a byte array with a length that it is a multiple of 32")
244 }
245
246 fn visit_bytes<E>(self, bytes: &[u8]) -> Result<Self::Value, E>
247 where
248 E: serde::de::Error,
249 {
250 if bytes.len() % 32 != 0 {
251 return Err(E::invalid_length(bytes.len(), &"a multiple of 32"));
252 }
253 let slice_of_arrays = <[[u8; 32]]>::ref_from_bytes(bytes)
254 .map_err(|_| E::custom("failed to cast bytes to &[[u8; 32]]"))?;
255 Ok(slice_of_arrays.to_vec())
256 }
257 }
258 deserializer.deserialize_bytes(Visitor)
259 }
260}
261
262#[derive(Default)]
263pub struct BlobMetadataLeafHashCollector(MerkleLeaves);
264
265impl BlobMetadataLeafHashCollector {
266 pub fn new() -> Self {
267 Self(Vec::new())
268 }
269}
270
271impl LeafHashCollector for BlobMetadataLeafHashCollector {
272 type Output = (Hash, MerkleLeaves);
273
274 fn add_leaf_hash(&mut self, hash: Hash) {
275 self.0.push(hash.into())
276 }
277
278 fn complete(mut self, root: Hash) -> Self::Output {
279 if self.0.len() == 1 {
281 debug_assert!(*root == self.0[0]);
282 self.0 = Vec::new();
283 }
284 (root, self.0)
285 }
286}
287
288#[cfg(test)]
289mod tests {
290 use super::BlobMetadata;
291 use crate::blob_metadata::{
292 BlobFormat, BlobMetadataLeafHashCollector, BlobMetadataUnversioned,
293 };
294 use crate::filesystem::{FxFilesystem, OpenFxFilesystem};
295 use crate::object_store::transaction::{LockKey, Options, lock_keys};
296 use crate::object_store::{
297 BLOB_MERKLE_ATTRIBUTE_ID, BLOB_METADATA_ATTRIBUTE_ID, DataObjectHandle, Directory,
298 FSVERITY_MERKLE_ATTRIBUTE_ID, HandleOptions, ObjectStore,
299 };
300 use assert_matches::assert_matches;
301 use fuchsia_merkle::MerkleRootBuilder;
302 use storage_device::DeviceHolder;
303 use storage_device::fake_device::FakeDevice;
304
305 const TEST_DEVICE_BLOCK_SIZE: u32 = 512;
306 const TEST_DEVICE_BLOCK_COUNT: u64 = 16 * 1024;
307 const TEST_OBJECT_NAME: &str = "foo";
308
309 async fn test_filesystem() -> OpenFxFilesystem {
310 let device =
311 DeviceHolder::new(FakeDevice::new(TEST_DEVICE_BLOCK_COUNT, TEST_DEVICE_BLOCK_SIZE));
312 FxFilesystem::new_empty(device).await.expect("new_empty failed")
313 }
314
315 async fn test_filesystem_and_empty_object() -> (OpenFxFilesystem, DataObjectHandle<ObjectStore>)
316 {
317 let fs = test_filesystem().await;
318 let store = fs.root_store();
319
320 let mut transaction = fs
321 .clone()
322 .new_transaction(
323 lock_keys![LockKey::object(
324 store.store_object_id(),
325 store.root_directory_object_id()
326 )],
327 Options::default(),
328 )
329 .await
330 .expect("new_transaction failed");
331
332 let object =
333 ObjectStore::create_object(&store, &mut transaction, HandleOptions::default(), None)
334 .await
335 .expect("create_object failed");
336
337 let root_directory =
338 Directory::open(&store, store.root_directory_object_id()).await.expect("open failed");
339 root_directory
340 .add_child_file(&mut transaction, TEST_OBJECT_NAME, &object)
341 .await
342 .expect("add_child_file failed");
343
344 transaction.commit().await.expect("commit failed");
345
346 (fs, object)
347 }
348
349 #[fuchsia::test(threads = 3)]
350 async fn test_write_read_zstd() {
351 let (fs, object) = test_filesystem_and_empty_object().await;
352
353 let metadata = BlobMetadata {
354 merkle_leaves: vec![[1; 32], [2; 32], [3; 32], [4; 32]],
355 format: BlobFormat::ChunkedZstd {
356 uncompressed_size: 128 * 1024,
357 chunk_size: 32 * 1024,
358 compressed_offsets: vec![0, 100, 200, 400],
359 },
360 };
361 metadata.write_to(&object).await.expect("failed to write attribute");
362 let read_metadata =
363 BlobMetadata::read_from(&object).await.expect("failed to read attribute");
364 assert_eq!(read_metadata, metadata);
365
366 fs.close().await.expect("close failed");
367 }
368
369 #[fuchsia::test(threads = 3)]
370 async fn test_write_read_lz4() {
371 let (fs, object) = test_filesystem_and_empty_object().await;
372
373 let metadata = BlobMetadata {
374 merkle_leaves: vec![[1; 32], [2; 32], [3; 32], [4; 32]],
375 format: BlobFormat::ChunkedLz4 {
376 uncompressed_size: 128 * 1024,
377 chunk_size: 32 * 1024,
378 compressed_offsets: vec![0, 100, 200, 400],
379 },
380 };
381 metadata.write_to(&object).await.expect("failed to write attribute");
382 let read_metadata =
383 BlobMetadata::read_from(&object).await.expect("failed to read attribute");
384 assert_eq!(read_metadata, metadata);
385
386 fs.close().await.expect("close failed");
387 }
388
389 #[fuchsia::test(threads = 3)]
390 async fn test_empty_attribute_is_not_written() {
391 let (fs, object) = test_filesystem_and_empty_object().await;
392
393 BlobMetadata::empty().write_to(&object).await.expect("failed to write attribute");
394 let result = object
395 .read_attr(BLOB_METADATA_ATTRIBUTE_ID)
396 .await
397 .expect("reading the attribute failed");
398 assert_eq!(result, None);
399
400 fs.close().await.expect("close failed");
401 }
402
403 #[fuchsia::test(threads = 3)]
404 async fn test_read_corrupt_attribute_fails() {
405 let (fs, object) = test_filesystem_and_empty_object().await;
406
407 object
408 .write_attr(BLOB_METADATA_ATTRIBUTE_ID, b"garbage")
409 .await
410 .expect("failed to write attribute");
411 BlobMetadata::read_from(&object).await.expect_err("reading the metadata should fail");
412
413 fs.close().await.expect("close failed");
414 }
415
416 #[fuchsia::test(threads = 3)]
417 async fn test_read_unversioned_attribute() {
418 let (fs, object) = test_filesystem_and_empty_object().await;
419
420 let unversioned_metadata = BlobMetadataUnversioned {
421 hashes: vec![[1; 32], [2; 32]],
422 chunk_size: 32 * 1024,
423 compressed_offsets: vec![0],
424 uncompressed_size: 15 * 1024,
425 };
426 let mut buf = Vec::new();
427 bincode::serialize_into(&mut buf, &unversioned_metadata)
428 .expect("failed to serialize metadata");
429 object.write_attr(BLOB_MERKLE_ATTRIBUTE_ID, &buf).await.expect("failed to write attribute");
430 let metadata = BlobMetadata::read_from(&object).await.expect("failed to read attribute");
431 assert_eq!(metadata, BlobMetadata::from(unversioned_metadata));
432
433 fs.close().await.expect("close failed");
434 }
435
436 #[fuchsia::test(threads = 3)]
437 async fn test_read_corrupt_unversioned_attribute_fails() {
438 let (fs, object) = test_filesystem_and_empty_object().await;
439
440 object
441 .write_attr(BLOB_MERKLE_ATTRIBUTE_ID, b"garbage")
442 .await
443 .expect("failed to write attribute");
444 BlobMetadata::read_from(&object).await.expect_err("reading the metadata should fail");
445
446 fs.close().await.expect("close failed");
447 }
448
449 #[fuchsia::test(threads = 3)]
450 async fn test_fs_verity_hides_blob_metadata() {
451 let (fs, object) = test_filesystem_and_empty_object().await;
452
453 let metadata = BlobMetadata {
454 merkle_leaves: vec![[1; 32], [2; 32]],
455 format: BlobFormat::Uncompressed,
456 };
457 metadata.write_to(&object).await.expect("failed to write attribute");
458 object
459 .write_attr(FSVERITY_MERKLE_ATTRIBUTE_ID, b"fs-verify")
460 .await
461 .expect("failed to write fs-verity attribute");
462 BlobMetadata::read_from(&object).await.expect_err("fs-verity should have been found");
463
464 fs.close().await.expect("close failed");
465 }
466
467 #[fuchsia::test]
468 async fn test_serialized_size() {
469 assert_matches!(BlobMetadata::empty().serialized_size(), Ok(0));
470 assert_matches!(
471 BlobMetadata {
472 merkle_leaves: vec![[54; 32], [55; 32]],
473 format: BlobFormat::Uncompressed,
474 }
475 .serialized_size(),
476 Ok(70)
481 );
482 assert_matches!(
483 BlobMetadata {
484 merkle_leaves: vec![[54; 32], [55; 32]],
485 format: BlobFormat::ChunkedZstd {
486 uncompressed_size: 128 * 1024,
487 chunk_size: 32 * 1024,
488 compressed_offsets: vec![0, 100, 200, 400],
489 },
490 }
491 .serialized_size(),
492 Ok(85)
501 );
502 }
503
504 #[fuchsia::test]
505 fn test_leaf_hash_collector_with_only_root() {
506 let data = vec![3; 4096];
507 let (_root, leaves) =
508 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
509 assert!(leaves.is_empty());
510 }
511
512 #[fuchsia::test]
513 fn test_leaf_hash_collector_with_leaves() {
514 let data = vec![3; 12 * 1024];
515 let (_root, leaves) =
516 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
517 assert_eq!(leaves.len(), 2);
518 }
519
520 #[fuchsia::test]
521 fn test_into_merkle_verifier_with_only_root() {
522 let data = vec![3; 4096];
523 let (root, leaves) =
524 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
525 let metadata = BlobMetadata { merkle_leaves: leaves, format: BlobFormat::Uncompressed };
526 let verifier =
527 metadata.into_merkle_verifier(root).expect("failed to create merkle verifier");
528 verifier.verify(0, &data).expect("failed to verify data");
529 }
530
531 #[fuchsia::test]
532 fn test_into_merkle_verifier_with_leaves() {
533 let data = vec![3; 12 * 1024];
534 let (root, leaves) =
535 MerkleRootBuilder::new(BlobMetadataLeafHashCollector::new()).complete(&data);
536 let metadata = BlobMetadata { merkle_leaves: leaves, format: BlobFormat::Uncompressed };
537 let verifier =
538 metadata.into_merkle_verifier(root).expect("failed to create merkle verifier");
539 verifier.verify(0, &data).expect("failed to verify data");
540 }
541
542 #[fuchsia::test]
543 fn test_convert_unversioned_to_versioned() {
544 assert_eq!(
545 BlobMetadata::from(BlobMetadataUnversioned {
546 hashes: vec![[1; 32], [2; 32]],
547 chunk_size: 0,
548 compressed_offsets: vec![],
549 uncompressed_size: 15 * 1024,
550 }),
551 BlobMetadata {
552 merkle_leaves: vec![[1; 32], [2; 32]],
553 format: BlobFormat::Uncompressed,
554 }
555 );
556
557 assert_eq!(
558 BlobMetadata::from(BlobMetadataUnversioned {
559 hashes: vec![[1; 32], [2; 32], [3; 32], [4; 32]],
560 chunk_size: 32 * 1024,
561 compressed_offsets: vec![0, 100],
562 uncompressed_size: 33 * 1024,
563 }),
564 BlobMetadata {
565 merkle_leaves: vec![[1; 32], [2; 32], [3; 32], [4; 32]],
566 format: BlobFormat::ChunkedZstd {
567 uncompressed_size: 33 * 1024,
568 chunk_size: 32 * 1024,
569 compressed_offsets: vec![0, 100]
570 },
571 }
572 );
573 }
574
575 #[fuchsia::test]
576 fn test_merkle_serialization() {}
577}