1use crate::crypto;
5use crate::dir::InlineDentry;
6use crate::reader::{NEW_ADDR, NULL_ADDR, Reader};
7use crate::superblock::BLOCK_SIZE;
8use crate::xattr::{XattrEntry, decode_xattr};
9use anyhow::{Error, anyhow, ensure};
10use bitflags::bitflags;
11use std::collections::HashMap;
12use std::fmt::Debug;
13use storage_device::buffer::Buffer;
14use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref, Unaligned};
15
16const NAME_MAX: usize = 255;
17const INODE_BLOCK_MAX_ADDR: usize = 923;
19const ADDR_BLOCK_NUM_ADDR: u32 = 1018;
21
22#[repr(C, packed)]
25#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
26pub struct Extent {
27 file_offset: u32,
28 block_address: u32,
29 len: u32,
30}
31
32#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
33pub struct Mode(u16);
34bitflags! {
35 impl Mode: u16 {
36 const RegularFile = 0o100000;
37 const Directory = 0o040000;
38 }
39}
40
41#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
42pub struct AdviseFlags(u8);
43bitflags! {
44 impl AdviseFlags: u8 {
45 const Encrypted = 0x04;
46 const EncryptedName = 0x08;
47 const Verity = 0x40;
48 }
49}
50
51#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
52pub struct InlineFlags(u8);
53bitflags! {
54 impl InlineFlags: u8 {
55 const Xattr = 0b00000001;
56 const Data = 0b00000010;
57 const Dentry = 0b00000100;
58 const ExtraAttr = 0b00100000;
59 }
60}
61
62#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
63pub struct Flags(u32);
64bitflags! {
65 impl Flags: u32 {
66 const Casefold = 0x40000000;
67 }
68}
69
70#[repr(C, packed)]
71#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
72pub struct InodeHeader {
73 pub mode: Mode,
74 pub advise_flags: AdviseFlags,
75 pub inline_flags: InlineFlags,
76 pub uid: u32,
77 pub gid: u32,
78 pub links: u32,
79 pub size: u64,
80 pub block_size: u64,
81 pub atime: i64,
82 pub ctime: i64,
83 pub mtime: i64,
84 pub atime_nanos: u32,
85 pub ctime_nanos: u32,
86 pub mtime_nanos: u32,
87 pub generation: u32,
88 pub dir_depth: u32,
89 pub xattr_nid: u32,
90 pub flags: Flags,
91 pub parent_inode: u32,
92 pub name_len: u32,
93 pub name: [u8; NAME_MAX],
94 pub dir_level: u8,
95
96 ext: Extent, }
98
99#[repr(C, packed)]
101#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
102pub struct InodeExtraAttr {
103 pub extra_size: u16,
104 pub inline_xattr_size: u16,
105 pub project_id: u32,
106 pub inode_checksum: u32,
107 pub creation_time: u64,
108 pub creation_time_nanos: u32,
109 pub compressed_blocks: u64,
110 pub compression_algorithm: u8,
111 pub log_cluster_size: u8,
112 pub compression_flags: u16,
113}
114
115#[repr(C, packed)]
116#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
117pub struct InodeFooter {
118 pub nid: u32,
119 pub ino: u32,
120 pub flag: u32,
121 pub cp_ver: u64,
122 pub next_blkaddr: u32,
123}
124
125pub struct Inode {
170 pub header: InodeHeader,
171 pub extra: Option<InodeExtraAttr>,
172 pub inline_data: Option<Box<[u8]>>,
173 pub(super) inline_dentry: Option<InlineDentry>,
174 pub(super) i_addrs: Vec<u32>,
175 nids: [u32; 5],
176 pub footer: InodeFooter,
177
178 nid_pages: HashMap<u32, Box<RawAddrBlock>>,
180 pub xattr: Vec<XattrEntry>,
181
182 pub context: Option<fscrypt::Context>,
184
185 pub block_addrs: Vec<u32>,
188}
189
190#[repr(C, packed)]
194#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
195pub struct RawAddrBlock {
196 pub addrs: [u32; ADDR_BLOCK_NUM_ADDR as usize],
197 _reserved:
198 [u8; BLOCK_SIZE - std::mem::size_of::<InodeFooter>() - 4 * ADDR_BLOCK_NUM_ADDR as usize],
199 pub footer: InodeFooter,
200}
201
202impl TryFrom<Buffer<'_>> for Box<RawAddrBlock> {
203 type Error = Error;
204 fn try_from(block: Buffer<'_>) -> Result<Self, Self::Error> {
205 Ok(Box::new(
206 RawAddrBlock::read_from_bytes(block.as_slice())
207 .map_err(|_| anyhow!("RawAddrBlock read failed"))?,
208 ))
209 }
210}
211
212impl Debug for Inode {
213 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
214 let mut out = f.debug_struct("Inode");
215 out.field("header", &self.header);
216 if let Some(extra) = &self.extra {
217 out.field("extra", &extra);
218 }
219 if let Some(inline_dentry) = &self.inline_dentry {
220 out.field("inline_dentry", &inline_dentry);
221 }
222 out.field("i_addrs", &self.i_addrs).field("footer", &self.footer);
223 out.field("xattr", &self.xattr);
224 out.finish()
225 }
226}
227
228impl Inode {
229 pub(super) async fn try_load(f2fs: &impl Reader, ino: u32) -> Result<Box<Inode>, Error> {
231 let mut block_addrs = vec![];
232 let mut raw_xattr = vec![];
233 let mut this = {
234 let block = f2fs.read_node(ino).await?;
235 block_addrs.push(f2fs.get_nat_entry(ino).await?.block_addr);
236 let (header, rest): (Ref<_, InodeHeader>, _) =
244 Ref::from_prefix(block.as_slice()).unwrap();
245 let (rest, footer): (_, Ref<_, InodeFooter>) = Ref::from_suffix(rest).unwrap();
246 ensure!(footer.ino == ino, "Footer ino doesn't match.");
247
248 let mut nids = [0u32; 5];
253 nids.as_mut_bytes()
254 .copy_from_slice(&rest[INODE_BLOCK_MAX_ADDR * 4..(INODE_BLOCK_MAX_ADDR + 5) * 4]);
255 let rest = &rest[..INODE_BLOCK_MAX_ADDR * 4];
256
257 let (extra, rest) = if header.inline_flags.contains(InlineFlags::ExtraAttr) {
258 let (extra, _): (Ref<_, InodeExtraAttr>, _) = Ref::from_prefix(rest).unwrap();
259 let extra_size = extra.extra_size as usize;
260 ensure!(extra_size <= rest.len(), "Bad extra_size in inode");
261 (Some((*extra).clone()), &rest[extra_size..])
262 } else {
263 (None, rest)
264 };
265 let rest = if header.inline_flags.contains(InlineFlags::Xattr) {
266 ensure!(
268 rest.len() >= 200,
269 "Insufficient space for inline xattr. Likely bad extra_size."
270 );
271 raw_xattr.extend_from_slice(&rest[rest.len() - 200..]);
272 &rest[..rest.len() - 200]
273 } else {
274 rest
275 };
276
277 let mut inline_data = None;
278 let mut inline_dentry = None;
279 let mut i_addrs: Vec<u32> = Vec::new();
280
281 if header.inline_flags.contains(InlineFlags::Data) {
282 ensure!(rest.len() >= 4, "Invalid inline data (insufficient remaining space)");
284 let data = &rest[4..];
285 ensure!(header.size <= data.len() as u64, "Invalid or corrupt inode.");
286 inline_data = Some(data[..header.size as usize].to_vec().into_boxed_slice());
287 } else if header.inline_flags.contains(InlineFlags::Dentry) {
288 inline_dentry = Some(InlineDentry::try_from_bytes(rest)?);
290 } else {
291 i_addrs.resize(rest.len() / 4, 0);
293 i_addrs.as_mut_bytes().copy_from_slice(&rest[..rest.len() / 4 * 4]);
294 };
295
296 Box::new(Self {
297 header: (*header).clone(),
298 extra,
299 inline_data: inline_data.map(|x| x.into()),
300 inline_dentry,
301 i_addrs,
302 nids,
303 footer: (*footer).clone(),
304
305 nid_pages: HashMap::new(),
306 xattr: vec![],
307 context: None,
308
309 block_addrs,
310 })
311 };
312
313 if this.header.xattr_nid != 0 {
316 raw_xattr.extend_from_slice(f2fs.read_node(this.header.xattr_nid).await?.as_slice());
317 this.block_addrs.push(f2fs.get_nat_entry(this.header.xattr_nid).await?.block_addr);
318 }
319 this.xattr = decode_xattr(&raw_xattr)?;
320
321 this.context = crypto::try_read_context_from_xattr(&this.xattr)?;
322
323 for (i, nid) in this.nids.into_iter().enumerate() {
326 if nid == NULL_ADDR {
327 continue;
328 }
329 match i {
330 0..2 => {
331 this.nid_pages.insert(nid, f2fs.read_node(nid).await?.try_into()?);
332 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
333 }
334 2..4 => {
335 let indirect = Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
336 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
337 for nid in indirect.addrs {
338 if nid != NULL_ADDR {
339 this.nid_pages.insert(nid, f2fs.read_node(nid).await?.try_into()?);
340 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
341 }
342 }
343 this.nid_pages.insert(nid, indirect);
344 }
345 4 => {
346 let double_indirect =
347 Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
348 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
349 for nid in double_indirect.addrs {
350 if nid != NULL_ADDR {
351 let indirect =
352 Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
353 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
354 for nid in indirect.addrs {
355 if nid != NULL_ADDR {
356 this.nid_pages
357 .insert(nid, f2fs.read_node(nid).await?.try_into()?);
358 this.block_addrs
359 .push(f2fs.get_nat_entry(nid).await?.block_addr);
360 }
361 }
362 this.nid_pages.insert(nid, indirect);
363 }
364 }
365 this.nid_pages.insert(nid, double_indirect);
366 }
367 _ => unreachable!(),
368 }
369 }
370
371 Ok(this)
372 }
373
374 pub fn data_blocks(&self) -> DataBlocksIter<'_> {
377 DataBlocksIter {
378 iter: BlockIter { inode: self, stage: 0, offset: 0, a: 0, b: 0, c: 0 },
379 next_block: None,
380 }
381 }
382
383 pub fn data_block_addr(&self, mut block_num: u32) -> u32 {
386 let offset = block_num;
387
388 if block_num < self.i_addrs.len() as u32 {
389 return self.i_addrs[block_num as usize];
390 }
391 block_num -= self.i_addrs.len() as u32;
392
393 const NID0_END: u32 = ADDR_BLOCK_NUM_ADDR;
395 const NID1_END: u32 = NID0_END + ADDR_BLOCK_NUM_ADDR;
396 const NID2_END: u32 = NID1_END + ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
397 const NID3_END: u32 = NID2_END + ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
398
399 let mut iter = match block_num {
400 ..NID0_END => {
401 let a = block_num;
402 BlockIter { inode: self, stage: 1, offset, a, b: 0, c: 0 }
403 }
404 ..NID1_END => {
405 let a = block_num - NID0_END;
406 BlockIter { inode: self, stage: 2, offset, a, b: 0, c: 0 }
407 }
408 ..NID2_END => {
409 block_num -= NID1_END;
410 let a = block_num / ADDR_BLOCK_NUM_ADDR;
411 let b = block_num % ADDR_BLOCK_NUM_ADDR;
412 BlockIter { inode: self, stage: 3, offset, a, b, c: 0 }
413 }
414 ..NID3_END => {
415 block_num -= NID2_END;
416 let a = block_num / ADDR_BLOCK_NUM_ADDR;
417 let b = block_num % ADDR_BLOCK_NUM_ADDR;
418 BlockIter { inode: self, stage: 4, offset, a, b, c: 0 }
419 }
420 _ => {
421 block_num -= NID3_END;
422 let a = block_num / ADDR_BLOCK_NUM_ADDR / ADDR_BLOCK_NUM_ADDR;
423 let b = (block_num / ADDR_BLOCK_NUM_ADDR) % ADDR_BLOCK_NUM_ADDR;
424 let c = block_num % ADDR_BLOCK_NUM_ADDR;
425 BlockIter { inode: self, stage: 5, offset, a, b, c }
426 }
427 };
428 if let Some((logical, physical)) = iter.next() {
429 if logical == offset { physical } else { NULL_ADDR }
430 } else {
431 NULL_ADDR
432 }
433 }
434}
435
436#[derive(Copy, Clone, Debug, PartialEq, Eq)]
437pub struct DataBlockExtent {
438 pub logical_block_num: u32,
440 pub physical_block_num: u32,
442 pub length: u32,
444}
445
446pub struct DataBlocksIter<'a> {
448 iter: BlockIter<'a>,
449 next_block: Option<(u32, u32)>,
450}
451
452impl Iterator for DataBlocksIter<'_> {
453 type Item = DataBlockExtent;
454 fn next(&mut self) -> Option<Self::Item> {
455 let (log_start, phys_start) = self.next_block.take().or_else(|| self.iter.next())?;
456 let mut len = 1;
457
458 if self.iter.inode.header.size > BLOCK_SIZE as u64 * u32::MAX as u64 {
461 return None;
462 }
463 let file_end = (self.iter.inode.header.size.next_multiple_of(BLOCK_SIZE as u64)
464 / BLOCK_SIZE as u64) as u32;
465
466 loop {
467 match self.iter.next() {
468 Some((log, phys))
469 if Some(log) == log_start.checked_add(len)
470 && Some(phys) == phys_start.checked_add(len)
471 && log != file_end =>
472 {
473 len += 1;
474 }
475 other => {
476 self.next_block = other;
477 return Some(DataBlockExtent {
478 logical_block_num: log_start,
479 physical_block_num: phys_start,
480 length: len,
481 });
482 }
483 }
484 }
485 }
486}
487
488struct BlockIter<'a> {
489 inode: &'a Inode,
490 stage: u32, offset: u32,
492 a: u32, b: u32, c: u32, }
496
497impl Iterator for BlockIter<'_> {
498 type Item = (u32, u32);
499 fn next(&mut self) -> Option<Self::Item> {
500 loop {
501 match self.stage {
502 0 => {
503 while let Some(&addr) = self.inode.i_addrs.get(self.a as usize) {
505 self.a += 1;
506 self.offset += 1;
507 if addr != NULL_ADDR && addr != NEW_ADDR {
508 return Some((self.offset - 1, addr));
509 }
510 }
511 self.stage += 1;
512 self.a = 0;
513 }
514 1..3 => {
515 let nid = self.inode.nids[self.stage as usize - 1];
517
518 if nid == NULL_ADDR || nid == NEW_ADDR {
519 self.stage += 1;
520 self.offset += ADDR_BLOCK_NUM_ADDR;
521 } else {
522 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
523 while let Some(&addr) = addrs.get(self.a as usize) {
524 self.a += 1;
525 self.offset += 1;
526 if addr != NULL_ADDR && addr != NEW_ADDR {
527 return Some((self.offset - 1, addr));
528 }
529 }
530 self.stage += 1;
531 self.a = 0;
532 }
533 }
534
535 3..5 => {
536 let nid = self.inode.nids[self.stage as usize - 1];
537 if nid == NULL_ADDR || nid == NEW_ADDR {
539 self.stage += 1;
540 self.offset += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
541 } else {
542 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
543 while let Some(&nid) = addrs.get(self.a as usize) {
544 if nid == NULL_ADDR || nid == NEW_ADDR {
545 self.a += 1;
546 self.offset += ADDR_BLOCK_NUM_ADDR;
547 } else {
548 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
549 while let Some(&addr) = addrs.get(self.b as usize) {
550 self.b += 1;
551 self.offset += 1;
552 if addr != NULL_ADDR && addr != NEW_ADDR {
553 return Some((self.offset - 1, addr));
554 }
555 }
556 self.a += 1;
557 self.b = 0;
558 }
559 }
560 self.stage += 1;
561 self.a = 0;
562 }
563 }
564
565 5 => {
566 let nid = self.inode.nids[4];
567 if nid != NULL_ADDR && nid != NEW_ADDR {
569 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
570 while let Some(&nid) = addrs.get(self.a as usize) {
571 if nid == NULL_ADDR || nid == NEW_ADDR {
572 self.a += 1;
573 self.offset += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
574 } else {
575 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
576 while let Some(&nid) = addrs.get(self.b as usize) {
577 if nid == NULL_ADDR || nid == NEW_ADDR {
578 self.b += 1;
579 self.offset += ADDR_BLOCK_NUM_ADDR;
580 } else {
581 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
582 while let Some(&addr) = addrs.get(self.c as usize) {
583 self.c += 1;
584 self.offset += 1;
585 if addr != NULL_ADDR && addr != NEW_ADDR {
586 return Some((self.offset - 1, addr));
587 }
588 }
589 self.b += 1;
590 self.c = 0;
591 }
592 }
593
594 self.a += 1;
595 self.b = 0;
596 }
597 }
598 }
599 self.stage += 1;
600 }
601 _ => {
602 break;
603 }
604 }
605 }
606 None
607 }
608}
609
610#[cfg(test)]
611mod test {
612 use super::*;
613 use crate::nat::RawNatEntry;
614 use crate::reader;
615 use anyhow;
616 use async_trait::async_trait;
617 use storage_device::buffer_allocator::{BufferAllocator, BufferSource};
618 use zerocopy::FromZeros;
619
620 struct FakeReader {
622 data: HashMap<u32, Box<[u8; 4096]>>,
623 nids: HashMap<u32, Box<[u8; 4096]>>,
624 allocator: BufferAllocator,
625 }
626
627 #[async_trait]
628 impl reader::Reader for FakeReader {
629 async fn read_raw_block(&self, block_addr: u32) -> Result<Buffer<'_>, Error> {
630 match self.data.get(&block_addr) {
631 None => Err(anyhow!("unexpected block {block_addr}")),
632 Some(value) => {
633 let mut block = self.allocator.allocate_buffer(BLOCK_SIZE).await;
634 block.as_mut_slice().copy_from_slice(value.as_ref());
635 Ok(block)
636 }
637 }
638 }
639
640 async fn read_node(&self, nid: u32) -> Result<Buffer<'_>, Error> {
641 match self.nids.get(&nid) {
642 None => Err(anyhow!("unexpected nid {nid}")),
643 Some(value) => {
644 let mut block = self.allocator.allocate_buffer(BLOCK_SIZE).await;
645 block.as_mut_slice().copy_from_slice(value.as_ref());
646 Ok(block)
647 }
648 }
649 }
650
651 fn fs_uuid(&self) -> &[u8; 16] {
652 &[0; 16]
653 }
654
655 async fn get_nat_entry(&self, nid: u32) -> Result<RawNatEntry, Error> {
656 Ok(RawNatEntry { ino: nid, block_addr: 0, ..Default::default() })
657 }
658 }
659
660 fn build_inode(ino: u32) -> Box<[u8; BLOCK_SIZE]> {
662 let mut header = InodeHeader::new_zeroed();
663 let mut footer = InodeFooter::new_zeroed();
664 let mut extra = InodeExtraAttr::new_zeroed();
665
666 extra.extra_size = std::mem::size_of::<InodeExtraAttr>().try_into().unwrap();
667
668 header.mode = Mode::RegularFile;
669 header.inline_flags.set(InlineFlags::ExtraAttr, true);
670 header.inline_flags.set(InlineFlags::Xattr, true);
671 footer.ino = ino;
672
673 let mut out = [0u8; BLOCK_SIZE];
674 out[..std::mem::size_of::<InodeHeader>()].copy_from_slice(&header.as_bytes());
675 out[std::mem::size_of::<InodeHeader>()
676 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
677 .copy_from_slice(&extra.as_bytes());
678 out[BLOCK_SIZE - std::mem::size_of::<InodeFooter>()..].copy_from_slice(&footer.as_bytes());
679 Box::new(out)
680 }
681
682 #[fuchsia::test]
683 async fn test_xattr_bounds() {
684 let mut reader = FakeReader {
685 data: [].into(),
686 nids: [(1, build_inode(1)), (2, [0u8; 4096].into()), (3, [0u8; 4096].into())].into(),
687 allocator: BufferAllocator::new(BLOCK_SIZE, BufferSource::new(BLOCK_SIZE * 10)),
688 };
689 assert!(Inode::try_load(&reader, 1).await.is_ok());
690
691 let header_len = std::mem::size_of::<InodeHeader>();
692 let footer_len = std::mem::size_of::<InodeFooter>();
693 let nids_len = std::mem::size_of::<u32>() * 5;
694 let overheads = header_len + footer_len + nids_len;
695
696 let mut extra = InodeExtraAttr::new_zeroed();
698 extra.extra_size = (BLOCK_SIZE - overheads - 200) as u16;
699 reader.nids.get_mut(&1).unwrap()[std::mem::size_of::<InodeHeader>()
700 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
701 .copy_from_slice(&extra.as_bytes());
702 assert!(Inode::try_load(&reader, 1).await.is_ok());
703
704 let mut extra = InodeExtraAttr::new_zeroed();
706 extra.extra_size = (BLOCK_SIZE - overheads - 199) as u16;
707 reader.nids.get_mut(&1).unwrap()[std::mem::size_of::<InodeHeader>()
708 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
709 .copy_from_slice(&extra.as_bytes());
710 assert!(Inode::try_load(&reader, 1).await.is_err());
711 }
712}
713
714#[cfg(test)]
715mod tests {
716 use zerocopy::FromZeros;
717
718 use super::*;
719
720 fn last_addr_block(addr: u32) -> Box<RawAddrBlock> {
721 let mut addr_block = RawAddrBlock::new_zeroed();
722 addr_block.addrs[ADDR_BLOCK_NUM_ADDR as usize - 1] = addr;
723 Box::new(addr_block)
724 }
725
726 #[test]
727 fn test_data_iter() {
728 let header = InodeHeader::new_zeroed();
735 let footer = InodeFooter::new_zeroed();
736 let mut nids = [0u32; 5];
737 let mut nid_pages = HashMap::new();
738 nid_pages.insert(101, last_addr_block(1001));
739 nid_pages.insert(102, last_addr_block(1002));
740
741 let mut i_addrs: Vec<u32> = Vec::new();
742 i_addrs.resize(INODE_BLOCK_MAX_ADDR, 0);
743 i_addrs[0] = 100;
744 i_addrs[1] = 101;
745 i_addrs[2] = 102;
746 i_addrs[INODE_BLOCK_MAX_ADDR - 1] = 1000;
747
748 nids[0] = 101;
749 nid_pages.insert(101, last_addr_block(1001));
750
751 nids[1] = 102;
752 nid_pages.insert(102, last_addr_block(1002));
753
754 nids[2] = 103;
755 nid_pages.insert(103, last_addr_block(104));
756 nid_pages.insert(104, last_addr_block(1003));
757
758 nids[3] = 105;
759 nid_pages.insert(105, last_addr_block(106));
760 nid_pages.insert(106, last_addr_block(1004));
761
762 nids[4] = 107;
763 nid_pages.insert(107, last_addr_block(108));
764 nid_pages.insert(108, last_addr_block(109));
765 nid_pages.insert(109, last_addr_block(1005));
766
767 let inode = Box::new(Inode {
768 header,
769 extra: None,
770 inline_data: None,
771 inline_dentry: None,
772 i_addrs,
773 nids,
774 footer: footer,
775
776 nid_pages,
777 xattr: vec![],
778 context: None,
779
780 block_addrs: vec![],
781 });
782
783 assert_eq!(inode.data_block_addr(0), 100);
785
786 let mut iter = inode.data_blocks();
787 assert_eq!(
788 iter.next(),
789 Some(DataBlockExtent { logical_block_num: 0, physical_block_num: 100, length: 3 })
790 );
791
792 let mut block_num = 922;
793 assert_eq!(
794 iter.next(),
795 Some(DataBlockExtent {
796 logical_block_num: block_num,
797 physical_block_num: 1000,
798 length: 1
799 })
800 ); assert_eq!(inode.data_block_addr(block_num), 1000);
802 block_num += ADDR_BLOCK_NUM_ADDR;
803 assert_eq!(
804 iter.next(),
805 Some(DataBlockExtent {
806 logical_block_num: block_num,
807 physical_block_num: 1001,
808 length: 1
809 })
810 ); assert_eq!(inode.data_block_addr(block_num), 1001);
812 block_num += ADDR_BLOCK_NUM_ADDR;
813 assert_eq!(
814 iter.next(),
815 Some(DataBlockExtent {
816 logical_block_num: block_num,
817 physical_block_num: 1002,
818 length: 1
819 })
820 ); assert_eq!(inode.data_block_addr(block_num), 1002);
822 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
823 assert_eq!(
824 iter.next(),
825 Some(DataBlockExtent {
826 logical_block_num: block_num,
827 physical_block_num: 1003,
828 length: 1
829 })
830 ); assert_eq!(inode.data_block_addr(block_num), 1003);
832 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
833 assert_eq!(
834 iter.next(),
835 Some(DataBlockExtent {
836 logical_block_num: block_num,
837 physical_block_num: 1004,
838 length: 1
839 })
840 ); assert_eq!(inode.data_block_addr(block_num), 1004);
842 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
843 assert_eq!(
844 iter.next(),
845 Some(DataBlockExtent {
846 logical_block_num: block_num,
847 physical_block_num: 1005,
848 length: 1
849 })
850 ); assert_eq!(inode.data_block_addr(block_num), 1005);
852 assert_eq!(iter.next(), None);
853 assert_eq!(inode.data_block_addr(block_num - 1), 0);
854 assert_eq!(inode.data_block_addr(block_num + 1), 0);
855 }
856}