1use crate::block_cache::BlockCache;
5use crate::checkpoint::*;
6use crate::crypto;
7use crate::dir::{DentryBlock, DirEntry};
8use crate::inode::{self, Inode};
9use crate::nat::{Nat, NatJournal, RawNatEntry, SummaryBlock};
10use crate::superblock::{
11 BLOCK_SIZE, BLOCKS_PER_SEGMENT, F2FS_MAGIC, SEGMENT_SIZE, SUPERBLOCK_OFFSET, SuperBlock,
12 f2fs_crc32,
13};
14use anyhow::{Error, anyhow, bail, ensure};
15use async_trait::async_trait;
16use std::collections::HashMap;
17use std::ops::Deref;
18use std::sync::Arc;
19use storage_device::Device;
20use storage_device::buffer::Buffer;
21use zerocopy::FromBytes;
22
23pub const NULL_ADDR: u32 = 0;
25pub const NEW_ADDR: u32 = 0xffffffff;
27
28#[async_trait]
31pub(super) trait Reader {
32 async fn read_raw_block(&self, block_addr: u32) -> Result<Buffer<'_>, Error>;
35
36 async fn read_node(&self, nid: u32) -> Result<Buffer<'_>, Error>;
38
39 fn get_key(&self, _identifier: &[u8; 16]) -> Option<&[u8; 64]> {
41 None
42 }
43
44 fn fs_uuid(&self) -> &[u8; 16];
46
47 fn get_decryptor_for_inode(&self, inode: &Inode) -> Option<crypto::PerFileDecryptor> {
50 if let Some(context) = inode.context {
51 if let Some(main_key) = self.get_key(&context.main_key_identifier) {
52 return Some(crypto::PerFileDecryptor::new(main_key, context, self.fs_uuid()));
53 }
54 }
55 None
56 }
57
58 async fn get_nat_entry(&self, nid: u32) -> Result<RawNatEntry, Error>;
60}
61
62pub struct F2fsReader {
63 device: Arc<dyn Device>,
64 superblock: SuperBlock, checkpoint: CheckpointPack, cp_start_block: u32, nat: Nat,
68
69 keys: HashMap<[u8; 16], [u8; 64]>,
71 cache: BlockCache,
72}
73
74impl Drop for F2fsReader {
75 fn drop(&mut self) {
76 self.keys.values_mut().for_each(|v| {
78 *v = [0u8; 64];
79 });
80 }
81}
82
83impl F2fsReader {
84 pub fn superblock(&self) -> &SuperBlock {
85 &self.superblock
86 }
87
88 pub fn checkpoint(&self) -> &CheckpointPack {
89 &self.checkpoint
90 }
91
92 pub async fn open_device(device: Arc<dyn Device>) -> Result<Self, Error> {
93 let (superblock, checkpoints) =
94 match Self::try_from_superblock(device.as_ref(), SUPERBLOCK_OFFSET).await {
95 Ok(x) => x,
96 Err(e) => Self::try_from_superblock(device.as_ref(), SUPERBLOCK_OFFSET * 2)
97 .await
98 .map_err(|_| e)?,
99 };
100
101 let mut last_error = anyhow!("No checkpoints found");
102
103 for (checkpoint, cp_start_block) in checkpoints {
104 let mut this = Self {
105 device: device.clone(),
106 superblock,
107 checkpoint,
108 cp_start_block,
109 nat: Nat::new(0, vec![], HashMap::new()),
110 keys: HashMap::with_capacity(16),
111 cache: BlockCache::new(1024, BLOCK_SIZE),
112 };
113
114 match this.read_nat_journal().await {
115 Ok(nat_journal) => {
116 this.nat = Nat::new(
117 this.superblock.nat_blkaddr,
118 this.checkpoint.nat_bitmap.clone(),
119 nat_journal,
120 );
121 return Ok(this);
122 }
123 Err(e) => {
124 let ver = this.checkpoint.header.checkpoint_ver;
125 log::warn!(
126 "Failed to initialize from checkpoint (Ver {} at {}): {}. Trying next.",
127 ver,
128 cp_start_block,
129 e
130 );
131 last_error = e;
132 }
134 }
135 }
136
137 Err(last_error)
138 }
139
140 async fn try_from_superblock(
141 device: &dyn Device,
142 superblock_offset: u64,
143 ) -> Result<(SuperBlock, Vec<(CheckpointPack, u32)>), Error> {
144 let superblock = SuperBlock::read_from_device(device, superblock_offset).await?;
145 let checkpoint_addr = superblock.cp_blkaddr;
146 let checkpoint_a_offset = BLOCK_SIZE as u64 * checkpoint_addr as u64;
147 let checkpoint_b_offset = checkpoint_a_offset + SEGMENT_SIZE as u64;
148
149 let mut checkpoints = Vec::new();
150
151 if let Ok(cp) = CheckpointPack::read_from_device(device, checkpoint_a_offset).await {
153 checkpoints.push((cp, checkpoint_addr));
154 }
155 if let Ok(cp) = CheckpointPack::read_from_device(device, checkpoint_b_offset).await {
156 checkpoints.push((cp, checkpoint_addr + BLOCKS_PER_SEGMENT as u32));
157 }
158
159 if checkpoints.is_empty() {
160 bail!("Failed to read any valid checkpoint");
161 }
162
163 checkpoints.sort_by(|(a, _), (b, _)| {
165 let va = a.header.checkpoint_ver;
166 let vb = b.header.checkpoint_ver;
167 vb.cmp(&va)
168 });
169
170 const MIN_METADATA_SEGMENT_COUNT: u32 = 8;
172
173 let first_cp = &checkpoints[0].0;
175
176 let metadata_segment_count = superblock
178 .segment_count_sit
179 .checked_add(superblock.segment_count_nat)
180 .and_then(|v| v.checked_add(first_cp.header.rsvd_segment_count))
181 .and_then(|v| v.checked_add(superblock.segment_count_ssa))
182 .and_then(|v| v.checked_add(superblock.segment_count_ckpt))
183 .ok_or_else(|| anyhow::anyhow!("Segment counts overflow"))?;
184 ensure!(
185 metadata_segment_count <= superblock.segment_count
186 && metadata_segment_count >= MIN_METADATA_SEGMENT_COUNT,
187 "Bad segment counts in checkpoint"
188 );
189 Ok((superblock, checkpoints))
190 }
191
192 pub fn checkpoint_start_addr(&self) -> u32 {
194 self.cp_start_block
195 }
196
197 fn nat(&self) -> &Nat {
198 &self.nat
199 }
200 pub fn summary_block_addr(&self) -> u32 {
203 let mut offset = self.checkpoint.header.cp_pack_start_sum;
204 if self.checkpoint.header.ckpt_flags & CP_ORPHAN_PRESENT_FLAG != 0 {
205 offset += 1;
208 }
209 self.checkpoint_start_addr() + offset
210 }
211
212 async fn read_nat_journal(&mut self) -> Result<HashMap<u32, RawNatEntry>, Error> {
213 if self.checkpoint.header.ckpt_flags & CKPT_FLAG_COMPACT_SUMMARY != 0 {
214 let summary_addr = self.summary_block_addr();
217 let block = self.read_raw_block(summary_addr).await?;
218 let n_nats = u16::read_from_bytes(&block.as_slice()[..2]).unwrap();
219 let nat_journal = NatJournal::read_from_bytes(
220 &block.as_slice()[2..2 + std::mem::size_of::<NatJournal>()],
221 )
222 .unwrap();
223 ensure!(
224 (n_nats as usize) <= nat_journal.entries.len(),
225 "n_nats {} larger than block size {}",
226 n_nats,
227 nat_journal.entries.len()
228 );
229 Ok(HashMap::from_iter(
230 nat_journal.entries[..n_nats as usize].into_iter().map(|e| (e.ino, e.entry)),
231 ))
232 } else {
233 let summary_addr = self.summary_block_addr();
236 let block = self.read_raw_block(summary_addr).await?;
237
238 let summary = SummaryBlock::read_from_bytes(block.as_slice())
239 .map_err(|_| anyhow!("Failed to parse SummaryBlock"))?;
240 ensure!(summary.footer.entry_type == 0u8, "sum_type != 0 in summary footer");
241 #[cfg(not(fuzz))]
242 {
243 let actual_checksum = f2fs_crc32(F2FS_MAGIC, &block.as_slice()[..BLOCK_SIZE - 4]);
244 let expected_checksum = summary.footer.check_sum;
245 if actual_checksum != expected_checksum {
246 log::warn!(
248 "Summary block checksum mismatch (actual: 0x{:x}, expected: 0x{:x}). \
249 This is normal for checkpoints with CP_CRC_RECOVERY_FLAG.",
250 actual_checksum,
251 expected_checksum
252 );
253 }
254 }
255 let n_nats = summary.n_nats;
256 ensure!(
257 (n_nats as usize) <= summary.nat_journal.entries.len(),
258 "n_nats {} larger than block size {}",
259 n_nats,
260 summary.nat_journal.entries.len()
261 );
262 let mut out = HashMap::new();
263 for i in 0..n_nats as usize {
264 out.insert(
265 summary.nat_journal.entries[i].ino,
266 summary.nat_journal.entries[i].entry,
267 );
268 }
269 Ok(out)
270 }
271 }
272
273 pub fn root_ino(&self) -> u32 {
274 self.superblock.root_ino
275 }
276
277 pub fn max_ino(&self) -> u32 {
280 (self.checkpoint.nat_bitmap.len() * 8) as u32
281 }
282
283 pub fn add_key(&mut self, main_key: &[u8; 64]) -> [u8; 16] {
286 let identifier = fscrypt::main_key_to_identifier(main_key);
287 println!("Adding key with identifier {}", hex::encode(identifier));
288 self.keys.insert(identifier.clone(), main_key.clone());
289 identifier
290 }
291
292 pub async fn readdir(&self, ino: u32) -> Result<Vec<DirEntry>, Error> {
294 let inode = Inode::try_load(self, ino).await?;
295 let decryptor = self.get_decryptor_for_inode(&inode);
296 let mode = inode.header.mode;
297 let advise_flags = inode.header.advise_flags;
298 let flags = inode.header.flags;
299 ensure!(mode.contains(inode::Mode::Directory), "not a directory");
300 if let Some(entries) = inode.get_inline_dir_entries(
301 advise_flags.contains(inode::AdviseFlags::Encrypted),
302 flags.contains(inode::Flags::Casefold),
303 &decryptor,
304 )? {
305 Ok(entries)
306 } else {
307 let mut entries = Vec::new();
308
309 for mut extent in inode.data_blocks() {
314 for _ in 0..extent.length {
315 let dentry_block = DentryBlock::read_from_bytes(
316 self.read_raw_block(extent.physical_block_num).await?.as_slice(),
317 )
318 .unwrap();
319 entries.append(&mut dentry_block.get_entries(
320 ino,
321 advise_flags.contains(inode::AdviseFlags::Encrypted),
322 flags.contains(inode::Flags::Casefold),
323 &decryptor,
324 )?);
325 extent.physical_block_num += 1;
326 }
327 }
328 Ok(entries)
329 }
330 }
331
332 pub async fn read_inode(&self, ino: u32) -> Result<Box<Inode>, Error> {
334 Inode::try_load(self, ino).await
335 }
336
337 pub fn read_symlink(&self, inode: &Inode) -> Result<Box<[u8]>, Error> {
339 if let Some(inline_data) = inode.inline_data.as_deref() {
340 let mut filename = inline_data.to_vec();
341 if inode.header.advise_flags.contains(inode::AdviseFlags::Encrypted) {
342 ensure!(filename.len() >= 2, "invalid encrypted symlink");
344 let symlink_len = u16::read_from_bytes(&filename[..2]).unwrap();
345 filename.drain(..2);
346 filename.truncate(symlink_len as usize);
347 ensure!(symlink_len == filename.len() as u16, "invalid encrypted symlink");
348 if let Some(decryptor) = self.get_decryptor_for_inode(inode) {
349 decryptor.decrypt_filename_data(inode.footer.ino, &mut filename);
350 } else {
351 let proxy_filename: String =
353 fscrypt::proxy_filename::ProxyFilename::new_with_hash_code(0, &filename)
354 .into();
355 filename = proxy_filename.as_bytes().to_vec();
356 }
357 while let Some(0) = filename.last() {
360 filename.pop();
361 }
362 }
363 Ok(filename.into_boxed_slice())
364 } else {
365 bail!("Not a valid symlink");
366 }
367 }
368
369 pub async fn read_data(
372 &self,
373 inode: &Inode,
374 block_num: u32,
375 ) -> Result<Option<Buffer<'_>>, Error> {
376 let inline_flags = inode.header.inline_flags;
377 ensure!(
378 !inline_flags.contains(crate::InlineFlags::Data),
379 "Can't use read_data() on inline file."
380 );
381 let block_addr = inode.data_block_addr(block_num);
382 if block_addr == NULL_ADDR || block_addr == NEW_ADDR {
383 return Ok(None);
385 }
386 let mut buffer = self.read_raw_block(block_addr).await?;
387 if let Some(decryptor) = self.get_decryptor_for_inode(inode) {
388 decryptor.decrypt_data(inode.footer.ino, block_num, buffer.as_mut().as_mut_slice());
389 }
390 Ok(Some(buffer))
391 }
392}
393
394#[async_trait]
395impl Reader for F2fsReader {
396 async fn read_raw_block(&self, block_addr: u32) -> Result<Buffer<'_>, Error> {
398 if let Some(block) = self.cache.get_buffer(block_addr, self.device.deref()).await {
399 return Ok(block);
400 }
401
402 const READAHEAD: u64 = 16;
403 let end = std::cmp::min(block_addr as u64 + READAHEAD, self.device.block_count());
404 let count = end.saturating_sub(block_addr as u64).max(1) as usize;
405
406 let mut buffer = self.device.allocate_buffer(count * BLOCK_SIZE).await;
407 self.device
408 .read(block_addr as u64 * BLOCK_SIZE as u64, buffer.as_mut())
409 .await
410 .map_err(|_| anyhow!("device read failed"))?;
411
412 for i in 0..count {
413 let slice = &buffer.as_slice()[i * BLOCK_SIZE..(i + 1) * BLOCK_SIZE];
414 self.cache.insert(block_addr + i as u32, slice.to_vec());
415 }
416 Ok(self.cache.get_buffer(block_addr, self.device.deref()).await.unwrap())
417 }
418
419 async fn read_node(&self, nid: u32) -> Result<Buffer<'_>, Error> {
420 let nat_entry = self.get_nat_entry(nid).await?;
421 self.read_raw_block(nat_entry.block_addr).await
422 }
423
424 fn get_key(&self, identifier: &[u8; 16]) -> Option<&[u8; 64]> {
425 self.keys.get(identifier)
426 }
427
428 fn fs_uuid(&self) -> &[u8; 16] {
429 &self.superblock.uuid
430 }
431
432 async fn get_nat_entry(&self, nid: u32) -> Result<RawNatEntry, Error> {
433 if let Some(entry) = self.nat().nat_journal.get(&nid) {
434 return Ok(*entry);
435 }
436 let nat_block_addr = self.nat().get_nat_block_for_entry(nid)?;
437 let offset = self.nat().get_nat_block_offset_for_entry(nid);
438 let block = self.read_raw_block(nat_block_addr).await?;
439 Ok(RawNatEntry::read_from_bytes(
440 &block.as_slice()[offset..offset + std::mem::size_of::<RawNatEntry>()],
441 )
442 .unwrap())
443 }
444}
445
446#[cfg(test)]
447mod test {
448 use super::*;
449 use crate::dir::FileType;
450 use crate::xattr;
451 use std::collections::HashSet;
452 use std::path::PathBuf;
453 use std::sync::Arc;
454
455 use storage_device::fake_device::FakeDevice;
456
457 fn open_test_image(path: &str) -> FakeDevice {
458 let path = std::path::PathBuf::from(path);
459 println!("path is {path:?}");
460 FakeDevice::from_image(
461 zstd::Decoder::new(std::fs::File::open(&path).expect("open image"))
462 .expect("decompress image"),
463 BLOCK_SIZE as u32,
464 )
465 .expect("open image")
466 }
467
468 #[fuchsia::test]
469 async fn test_open_fs() {
470 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
471
472 let f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
473 assert_eq!(f2fs.root_ino(), 3);
475 let superblock = &f2fs.superblock;
476 let major_ver = superblock.major_ver;
477 let minor_ver = superblock.minor_ver;
478 assert_eq!(major_ver, 1);
479 assert_eq!(minor_ver, 16);
480 assert_eq!(superblock.get_total_size(), 256 << 20);
481 assert_eq!(superblock.get_volume_name().expect("get volume name"), "testimage");
482 }
483
484 async fn resolve_inode_path(f2fs: &F2fsReader, path: &str) -> Result<u32, Error> {
486 let path = PathBuf::from(path.strip_prefix("/").unwrap());
487 let mut ino = f2fs.root_ino();
488 for filename in &path {
489 let entries = f2fs.readdir(ino).await?;
490 if let Some(entry) = entries.iter().filter(|e| *e.filename == *filename).next() {
491 ino = entry.ino;
492 } else {
493 bail!("Not found.");
494 }
495 }
496 Ok(ino)
497 }
498
499 #[fuchsia::test]
500 async fn test_basic_dirs() {
501 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
502
503 let f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
504 let root_ino = f2fs.root_ino();
505 let root_entries = f2fs.readdir(root_ino).await.expect("readdir");
506 assert_eq!(root_entries.len(), 7);
507 assert_eq!(root_entries[0].filename, "a");
508 assert_eq!(root_entries[0].file_type, FileType::Directory);
509 assert_eq!(root_entries[1].filename, "large_dir");
510 assert_eq!(root_entries[2].filename, "large_dir2");
511 assert_eq!(root_entries[3].filename, "sparse.dat");
512 assert_eq!(root_entries[4].filename, "verity");
513 assert_eq!(root_entries[5].filename, "fscrypt");
514 assert_eq!(root_entries[6].filename, "large_zero");
515
516 let inlined_file_ino =
517 resolve_inode_path(&f2fs, "/a/b/c/inlined").await.expect("resolve inlined");
518 let inode = Inode::try_load(&f2fs, inlined_file_ino).await.expect("load inode");
519 let block_size = inode.header.block_size;
520 let size = inode.header.size;
521 assert_eq!(block_size, 1);
522 assert_eq!(size, 12);
523 assert_eq!(inode.inline_data.unwrap().as_ref(), "inline_data\n".as_bytes());
524
525 const REG_FILE_SIZE: u64 = 8 * BLOCK_SIZE as u64 + 8;
526 const REG_FILE_BLOCKS: u64 = 9 + 1;
527 let regular_file_ino =
528 resolve_inode_path(&f2fs, "/a/b/c/regular").await.expect("resolve regular");
529 let inode = Inode::try_load(&f2fs, regular_file_ino).await.expect("load inode");
530 let block_size = inode.header.block_size;
531 let size = inode.header.size;
532 assert_eq!(block_size, REG_FILE_BLOCKS);
533 assert_eq!(size, REG_FILE_SIZE);
534 assert!(inode.inline_data.is_none());
535 for i in 0..8 {
536 assert_eq!(
537 f2fs.read_data(&inode, i).await.expect("read data").unwrap().as_slice(),
538 &[0u8; BLOCK_SIZE]
539 );
540 }
541 assert_eq!(
542 &f2fs.read_data(&inode, 8).await.expect("read data").unwrap().as_slice()[..9],
543 b"01234567\0"
544 );
545
546 let symlink_ino =
547 resolve_inode_path(&f2fs, "/a/b/c/symlink").await.expect("resolve symlink");
548 let inode = Inode::try_load(&f2fs, symlink_ino).await.expect("load inode");
549 assert_eq!(f2fs.read_symlink(&inode).expect("read_symlink").as_ref(), b"regular");
550
551 let hardlink_ino =
552 resolve_inode_path(&f2fs, "/a/b/c/hardlink").await.expect("resolve hardlink");
553 let inode = Inode::try_load(&f2fs, hardlink_ino).await.expect("load inode");
554 let block_size = inode.header.block_size;
555 let size = inode.header.size;
556 assert_eq!(block_size, REG_FILE_BLOCKS);
557 assert_eq!(size, REG_FILE_SIZE);
558
559 let chowned_ino =
560 resolve_inode_path(&f2fs, "/a/b/c/chowned").await.expect("resolve chowned");
561 let inode = Inode::try_load(&f2fs, chowned_ino).await.expect("load inode");
562 let uid = inode.header.uid;
563 let gid = inode.header.gid;
564 assert_eq!(uid, 999);
565 assert_eq!(gid, 999);
566
567 let large_dir = resolve_inode_path(&f2fs, "/large_dir").await.expect("resolve large_dir");
568 assert_eq!(f2fs.readdir(large_dir).await.expect("readdir").len(), 2001);
569
570 let large_dir2 = resolve_inode_path(&f2fs, "/large_dir2").await.expect("resolve large_dir");
571 assert_eq!(f2fs.readdir(large_dir2).await.expect("readdir").len(), 1);
572
573 let sparse_dat =
574 resolve_inode_path(&f2fs, "/sparse.dat").await.expect("resolve sparse.dat");
575 let inode = Inode::try_load(&f2fs, sparse_dat).await.expect("load inode");
576 let data_blocks: Vec<_> = inode.data_blocks().into_iter().collect();
577 assert_eq!(data_blocks.len(), 6);
578 assert_eq!(data_blocks[0].logical_block_num, 0);
579 assert_eq!(data_blocks[0].length, 1);
580 let block =
582 f2fs.read_raw_block(data_blocks[0].physical_block_num).await.expect("read sparse");
583 assert_eq!(&block.as_slice()[..3], b"foo");
584 assert_eq!(data_blocks[1].logical_block_num, 923);
586 assert_eq!(data_blocks[1].length, 1);
587 assert_eq!(data_blocks[2].logical_block_num, 1941);
588 assert_eq!(data_blocks[2].length, 1);
589 assert_eq!(data_blocks[3].logical_block_num, 2959);
590 assert_eq!(data_blocks[3].length, 1);
591 assert_eq!(data_blocks[4].logical_block_num, 1039283);
592 assert_eq!(data_blocks[4].length, 1);
593 assert_eq!(data_blocks[5].logical_block_num, 104671683);
594 assert_eq!(data_blocks[5].length, 2);
595 let block =
596 f2fs.read_raw_block(data_blocks[5].physical_block_num).await.expect("read sparse");
597 assert_eq!(block.as_slice(), &[0; BLOCK_SIZE]);
598 assert_eq!(
600 &f2fs.read_data(&inode, 104671684).await.expect("read data block").unwrap().as_slice()
601 [..3],
602 b"bar"
603 );
604 assert!(f2fs.read_data(&inode, 104671684 - 10).await.expect("read data block").is_none());
606 }
607
608 #[fuchsia::test]
609 async fn test_xattr() {
610 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
611
612 let f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
613 let sparse_dat =
614 resolve_inode_path(&f2fs, "/sparse.dat").await.expect("resolve sparse.dat");
615 let inode = Inode::try_load(&f2fs, sparse_dat).await.expect("load inode");
616 assert_eq!(
617 inode.xattr,
618 vec![
619 xattr::XattrEntry {
620 index: xattr::Index::User,
621 name: Box::new(b"a".to_owned()),
622 value: Box::new(b"value".to_owned())
623 },
624 xattr::XattrEntry {
625 index: xattr::Index::User,
626 name: Box::new(b"c".to_owned()),
627 value: Box::new(b"value".to_owned())
628 },
629 xattr::XattrEntry {
630 index: xattr::Index::User,
631 name: Box::new(b"padding_test_1".to_owned()),
632 value: Box::new(b"v".to_owned())
633 },
634 xattr::XattrEntry {
635 index: xattr::Index::User,
636 name: Box::new(b"padding_test_2".to_owned()),
637 value: Box::new(b"va".to_owned())
638 },
639 xattr::XattrEntry {
640 index: xattr::Index::User,
641 name: Box::new(b"padding_test_3".to_owned()),
642 value: Box::new(b"val".to_owned())
643 },
644 xattr::XattrEntry {
645 index: xattr::Index::User,
646 name: Box::new(b"padding_test_4".to_owned()),
647 value: Box::new(b"valu".to_owned())
648 },
649 xattr::XattrEntry {
650 index: xattr::Index::User,
651 name: Box::new(b"padding_test_5".to_owned()),
652 value: Box::new(b"value".to_owned())
653 },
654 ]
655 );
656 }
657
658 #[fuchsia::test]
659 async fn test_fsverity() {
660 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
661 let mut f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
662 f2fs.add_key(&[0u8; 64]);
663 let verity_files = vec![
664 "/verity/inlined",
665 "/verity/regular",
666 "/verity/merkle_layers.dat",
667 "/fscrypt/a/b/regular",
668 ];
669 for file_path in verity_files {
670 let file = resolve_inode_path(&f2fs, file_path).await.expect("resolve file");
671 let inode = Inode::try_load(&f2fs, file).await.expect("load inode");
672 assert!(inode.header.advise_flags.contains(inode::AdviseFlags::Verity));
673 }
674 let file = resolve_inode_path(&f2fs, "/a/b/c/regular").await.expect("resolve file");
676 let inode = Inode::try_load(&f2fs, file).await.expect("load inode");
677 assert!(!inode.header.advise_flags.contains(inode::AdviseFlags::Verity));
678 }
680
681 #[fuchsia::test]
682 async fn test_fbe() {
683 let str_a = "2ll82QAAAADywluz1Ule7OVNBxUfa5Mw";
692 let str_b = "sttckQAAAADLBOCVVgjrZ-CXNkj5E6Cr";
693 let str_symlink = "zHAtQgAAAACRNPQYvCKuQo5F8rQUORg3";
694 let bytes_symlink_content = b"AAAAAAAAAADUsYZ_qNiiouF7e40xm65S";
695
696 let mut expected : HashSet<_> = [ "2ll82QAAAADywluz1Ule7OVNBxUfa5Mw",
698 "65OSUQAAAADqOiZJcQ1El2dpVdYMy84l",
699 "7vcnbgAAAAAOWdQfi4wK46uRGQBD0YSy",
700 "9Gsv9QAAAADjTeJ_9WdCxZMVTiSWhsWR",
701 "FAqGXAAAAAD1jOLXaZN-o8X9PoS67GI7",
702 "Rq5qZAAAAAA3y2lvAqesYDnVJWMklWnj",
703 "S93sdgAAAABo-YmXNPKtv4wxQCcUslTu",
704 "VP8QBwAAAAATw6Ozex0N2gMYrnDsB2aH",
705 "xUNjwgAAAADB0pEx5ovwx-AS02L0d1j7VMBRXzM4YnBri2pbasOqbFLhtegXr9kDGNcYd_hyk2mOkQIqu8hk7eARlFl-bq1yLhikhIT9HVC3FMrI7vQ-ewncEjXLDP3KK6RtH3r34S89AlzJZ4DVfXrr_Q5N5mANBbGTzeO70aJHL0Ms-MgkKwjHcbIxXLwcjE2B-mssLAvXam58pSD-aazxS_J2hrxOHGoUYiVJ-rXHozmKxBdWAO6OUW65",
706 ].into_iter().collect();
707
708 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
709
710 let mut f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
711
712 resolve_inode_path(&f2fs, "/fscrypt/a/b/regular")
716 .await
717 .expect_err("resolve fscrypt regular");
718 let fscrypt_dir_ino =
719 resolve_inode_path(&f2fs, "/fscrypt").await.expect("resolve encrypted dir");
720 let entries = f2fs.readdir(fscrypt_dir_ino).await.expect("readdir");
721 println!("entries {entries:?}");
722
723 for entry in entries {
724 assert!(expected.remove(entry.filename.as_str()), "unexpected entry {entry:?}");
725 }
726 assert!(expected.is_empty());
727
728 resolve_inode_path(&f2fs, &format!("/fscrypt/{str_a}"))
729 .await
730 .expect("resolve encrypted dir");
731 let enc_symlink_ino =
732 resolve_inode_path(&f2fs, &format!("/fscrypt/{str_a}/{str_b}/{str_symlink}"))
733 .await
734 .expect("resolve encrypted symlink");
735 let symlink_inode =
736 Inode::try_load(&f2fs, enc_symlink_ino).await.expect("load symlink inode");
737 assert_eq!(
738 &*f2fs.read_symlink(&symlink_inode).expect("read_symlink"),
739 bytes_symlink_content
740 );
741
742 f2fs.add_key(&[0u8; 64]);
744 resolve_inode_path(&f2fs, "/fscrypt/a/b/regular").await.expect("resolve fscrypt regular");
745 let inlined_ino = resolve_inode_path(&f2fs, "/fscrypt/a/b/inlined")
746 .await
747 .expect("resolve fscrypt inlined");
748 let short_file = Inode::try_load(&f2fs, inlined_ino).await.expect("load symlink inode");
749 assert!(
750 !short_file.header.inline_flags.contains(inode::InlineFlags::Data),
751 "encrypted files shouldn't be inlined"
752 );
753 let short_data =
754 f2fs.read_data(&short_file, 0).await.expect("read_data").expect("non-empty page");
755 assert_eq!(
756 &short_data.as_slice()[..short_file.header.size as usize],
757 b"test45678abcdef_12345678"
758 );
759
760 let symlink_ino = resolve_inode_path(&f2fs, "/fscrypt/a/b/symlink")
761 .await
762 .expect("resolve fscrypt symlink");
763 assert_eq!(symlink_ino, enc_symlink_ino);
764
765 let symlink_inode = Inode::try_load(&f2fs, symlink_ino).await.expect("load symlink inode");
766 let symlink = f2fs.read_symlink(&symlink_inode).expect("read_symlink");
767 assert_eq!(*symlink, *b"inlined");
768 }
769
770 #[fuchsia::test]
771 async fn test_summary_block_addr() {
772 let device = open_test_image("/pkg/testdata/f2fs.img.zst");
773 let mut f2fs = F2fsReader::open_device(Arc::new(device)).await.expect("open ok");
774
775 f2fs.checkpoint.header.ckpt_flags = 0; f2fs.checkpoint.header.cp_pack_start_sum = 100;
778 let base = f2fs.checkpoint_start_addr();
779 assert_eq!(f2fs.summary_block_addr(), base + 100);
780
781 f2fs.checkpoint.header.ckpt_flags = CP_ORPHAN_PRESENT_FLAG;
783 assert_eq!(f2fs.summary_block_addr(), base + 100 + 1);
784
785 f2fs.checkpoint.header.ckpt_flags = CP_ORPHAN_PRESENT_FLAG | CKPT_FLAG_COMPACT_SUMMARY;
787 assert_eq!(f2fs.summary_block_addr(), base + 100 + 1);
788 }
789}