1use crate::util;
8use thiserror::Error;
9
10#[derive(Error, Debug)]
12pub enum ElfLoadError {
13 #[error("ELF load segments were empty")]
14 NothingToLoad,
15 #[error("Failed to allocate VMAR for ELF: {}", _0)]
16 VmarAllocate(zx::Status),
17 #[error("Failed to map VMAR: {}", _0)]
18 VmarMap(zx::Status),
19 #[error("Failed to create CoW VMO clone: {}", _0)]
20 VmoCowClone(zx::Status),
21 #[error("Failed to create VMO: {}", _0)]
22 VmoCreate(zx::Status),
23 #[error("Failed to read from VMO: {}", _0)]
24 VmoRead(zx::Status),
25 #[error("Failed to write to VMO: {}", _0)]
26 VmoWrite(zx::Status),
27 #[error("Failed to get VMO name: {}", _0)]
28 GetVmoName(zx::Status),
29 #[error("Failed to set VMO name: {}", _0)]
30 SetVmoName(zx::Status),
31}
32
33impl ElfLoadError {
34 pub fn as_zx_status(&self) -> zx::Status {
36 match self {
37 ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
38 ElfLoadError::VmarAllocate(s)
39 | ElfLoadError::VmarMap(s)
40 | ElfLoadError::VmoCowClone(s)
41 | ElfLoadError::VmoCreate(s)
42 | ElfLoadError::VmoRead(s)
43 | ElfLoadError::VmoWrite(s)
44 | ElfLoadError::GetVmoName(s)
45 | ElfLoadError::SetVmoName(s) => *s,
46 }
47 }
48}
49
50#[derive(Debug)]
52pub struct LoadedElfInfo {
53 pub low: usize,
55
56 pub high: usize,
58
59 pub max_perm: elf_parse::SegmentFlags,
61}
62
63pub fn loaded_elf_info(headers: &elf_parse::Elf64Headers) -> LoadedElfInfo {
67 let (mut first, mut low, mut high) = (true, 0, 0);
68 let mut max_perm = elf_parse::SegmentFlags::empty();
69 for hdr in headers.program_headers_with_type(elf_parse::SegmentType::Load) {
70 if first {
72 low = util::page_start(hdr.vaddr);
73 first = false;
74 }
75 high = util::page_end(hdr.vaddr + hdr.memsz as usize);
76 max_perm |= hdr.flags();
77 }
78 LoadedElfInfo { low, high, max_perm }
79}
80
81#[derive(Debug)]
83pub struct LoadedElf {
84 pub vmar: zx::Vmar,
86
87 pub vmar_base: usize,
89
90 pub entry: usize,
92}
93
94pub trait Mapper {
96 fn map(
100 &self,
101 vmar_offset: usize,
102 vmo: &zx::Vmo,
103 vmo_offset: u64,
104 length: usize,
105 flags: zx::VmarFlags,
106 ) -> Result<usize, zx::Status>;
107}
108
109impl Mapper for zx::Vmar {
110 fn map(
111 &self,
112 vmar_offset: usize,
113 vmo: &zx::Vmo,
114 vmo_offset: u64,
115 length: usize,
116 flags: zx::VmarFlags,
117 ) -> Result<usize, zx::Status> {
118 Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
119 }
120}
121
122pub fn load_elf(
124 vmo: &zx::Vmo,
125 headers: &elf_parse::Elf64Headers,
126 root_vmar: &zx::Vmar,
127) -> Result<LoadedElf, ElfLoadError> {
128 let info = loaded_elf_info(headers);
129 let size = info.high - info.low;
130 if size == 0 {
131 return Err(ElfLoadError::NothingToLoad);
132 }
133
134 let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
137 let (vmar, vmar_base) =
138 root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
139
140 let vaddr_bias = vmar_base.wrapping_sub(info.low);
144
145 map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
146 Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
147}
148
149pub fn map_elf_segments(
151 vmo: &zx::Vmo,
152 headers: &elf_parse::Elf64Headers,
153 mapper: &dyn Mapper,
154 mapper_base: usize,
155 vaddr_bias: usize,
156) -> Result<(), ElfLoadError> {
157 let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
167 let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
168 for hdr in headers.program_headers_with_type(elf_parse::SegmentType::Load) {
169 let adjust = util::page_offset(hdr.offset);
171 let mut file_offset = hdr.offset - adjust;
172 let file_size = hdr.filesz + adjust as u64;
173 let virt_offset = hdr.vaddr - adjust;
174 let virt_size = hdr.memsz + adjust as u64;
175
176 let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
179
180 let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
184
185 let vmo_to_map: &zx::Vmo;
189 let writeable_vmo: zx::Vmo;
190 if must_write || (file_size > 0 && hdr.flags().contains(elf_parse::SegmentFlags::WRITE)) {
191 writeable_vmo = vmo
192 .create_child(
193 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
194 file_offset as u64,
195 util::page_end(file_size as usize) as u64,
196 )
197 .map_err(ElfLoadError::VmoCowClone)?;
198 writeable_vmo
199 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
200 .map_err(ElfLoadError::SetVmoName)?;
201 file_offset = 0;
203
204 if virt_size > file_size {
206 let memset_size = util::page_end(file_size as usize) - file_size as usize;
208 if memset_size > 0 {
209 writeable_vmo
210 .write(&vec![0u8; memset_size], file_size)
211 .map_err(|s| ElfLoadError::VmoWrite(s))?;
212 }
213 }
214 vmo_to_map = &writeable_vmo;
215 } else {
216 vmo_to_map = vmo;
217 }
218
219 let flags = zx::VmarFlags::SPECIFIC
223 | zx::VmarFlags::ALLOW_FAULTS
224 | elf_to_vmar_perm_flags(&hdr.flags());
225 if file_size != 0 {
226 mapper
227 .map(
228 virt_addr,
229 vmo_to_map,
230 file_offset as u64,
231 util::page_end(file_size as usize),
232 flags,
233 )
234 .map_err(ElfLoadError::VmarMap)?;
235 }
236
237 if virt_size > file_size {
241 let bss_vmo_start = util::page_end(file_size as usize);
243 let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
244 if bss_vmo_size > 0 {
245 let anon_vmo =
246 zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
247 anon_vmo
248 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
249 .map_err(ElfLoadError::SetVmoName)?;
250 mapper
251 .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
252 .map_err(ElfLoadError::VmarMap)?;
253 }
254 }
255 }
256 Ok(())
257}
258
259const VMO_NAME_PREFIX_BSS: &str = "bss:";
261const VMO_NAME_PREFIX_DATA: &str = "data:";
262
263fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
265 assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
266 if name.is_empty() {
267 zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
268 } else {
269 zx::Name::new_lossy(&format!("{prefix}{name}"))
270 }
271}
272
273fn elf_to_vmar_can_map_flags(elf_flags: &elf_parse::SegmentFlags) -> zx::VmarFlags {
274 let mut flags = zx::VmarFlags::empty();
275 if elf_flags.contains(elf_parse::SegmentFlags::READ) {
276 flags |= zx::VmarFlags::CAN_MAP_READ;
277 }
278 if elf_flags.contains(elf_parse::SegmentFlags::WRITE) {
279 flags |= zx::VmarFlags::CAN_MAP_WRITE;
280 }
281 if elf_flags.contains(elf_parse::SegmentFlags::EXECUTE) {
282 flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
283 }
284 flags
285}
286
287fn elf_to_vmar_perm_flags(elf_flags: &elf_parse::SegmentFlags) -> zx::VmarFlags {
288 let mut flags = zx::VmarFlags::empty();
289 if elf_flags.contains(elf_parse::SegmentFlags::READ) {
290 flags |= zx::VmarFlags::PERM_READ;
291 }
292 if elf_flags.contains(elf_parse::SegmentFlags::WRITE) {
293 flags |= zx::VmarFlags::PERM_WRITE;
294 }
295 if elf_flags.contains(elf_parse::SegmentFlags::EXECUTE) {
296 flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
297 }
298 flags
299}
300
301#[cfg(test)]
302mod tests {
303 use super::*;
304 use assert_matches::assert_matches;
305 use fidl::HandleBased;
306 use std::cell::RefCell;
307 use std::mem::size_of;
308 use std::sync::LazyLock;
309
310 #[test]
311 fn test_vmo_name_with_prefix() {
312 let empty_vmo_name = zx::Name::default();
313 let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
314 let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
315
316 assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
317 assert_eq!(
318 vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
319 "bss:short_vmo_name",
320 );
321 assert_eq!(
322 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
323 "bss:a_great_maximum_length_vmo_",
324 );
325 assert_eq!(
326 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
327 "data:a_great_maximum_length_vmo",
328 );
329
330 assert_eq!(
331 vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
332 "a_long_vmo_name_prefix:<unknown",
333 );
334 assert_eq!(
335 vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
336 max_vmo_name,
337 );
338 assert_eq!(
339 vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
340 "anystringherea_great_maximum_le"
341 );
342 }
343
344 #[derive(Debug)]
345 struct RecordedMapping {
346 vmo: zx::Vmo,
347 vmo_offset: u64,
348 length: usize,
349 flags: zx::VmarFlags,
350 }
351
352 struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
354
355 impl TrackingMapper {
356 fn new() -> Self {
357 Self(RefCell::new(Vec::new()))
358 }
359 }
360
361 impl IntoIterator for TrackingMapper {
362 type Item = RecordedMapping;
363 type IntoIter = std::vec::IntoIter<Self::Item>;
364
365 fn into_iter(self) -> Self::IntoIter {
366 self.0.into_inner().into_iter()
367 }
368 }
369
370 impl Mapper for TrackingMapper {
371 fn map(
372 &self,
373 vmar_offset: usize,
374 vmo: &zx::Vmo,
375 vmo_offset: u64,
376 length: usize,
377 flags: zx::VmarFlags,
378 ) -> Result<usize, zx::Status> {
379 self.0.borrow_mut().push(RecordedMapping {
380 vmo: vmo.duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
381 vmo_offset,
382 length,
383 flags,
384 });
385 Ok(vmar_offset)
386 }
387 }
388
389 const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
391 ident: elf_parse::ElfIdent {
392 magic: elf_parse::ELF_MAGIC,
393 class: elf_parse::ElfClass::Elf64 as u8,
394 data: elf_parse::NATIVE_ENCODING as u8,
395 version: elf_parse::ElfVersion::Current as u8,
396 osabi: 0x00,
397 abiversion: 0x00,
398 pad: [0; 7],
399 },
400 elf_type: elf_parse::ElfType::SharedObject as u16,
401 machine: elf_parse::CURRENT_ARCH as u16,
402 version: elf_parse::ElfVersion::Current as u32,
403 entry: 0x10000,
404 phoff: size_of::<elf_parse::Elf64FileHeader>(),
405 shoff: 0,
406 flags: 0,
407 ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
408 phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
409 phnum: 1,
410 shentsize: 0,
411 shnum: 0,
412 shstrndx: 0,
413 };
414
415 const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
418 zx::Rights::DUPLICATE.bits()
419 | zx::Rights::TRANSFER.bits()
420 | zx::Rights::READ.bits()
421 | zx::Rights::WRITE.bits()
422 | zx::Rights::MAP.bits()
423 | zx::Rights::GET_PROPERTY.bits()
424 | zx::Rights::SET_PROPERTY.bits(),
425 );
426
427 #[test]
428 fn map_read_only_with_page_unaligned_bss() {
429 const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
430
431 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
433 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
434 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
435 segment_type: elf_parse::SegmentType::Load as u32,
436 flags: elf_parse::SegmentFlags::from_bits_truncate(
437 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
438 )
439 .bits(),
440 offset: *PAGE_SIZE,
441 vaddr: 0x10000,
442 paddr: 0x10000,
443 filesz: ELF_DATA.len() as u64,
444 memsz: 0x100,
445 align: *PAGE_SIZE as u64,
446 });
447 let headers = elf_parse::Elf64Headers::new_for_test(
448 ELF_FILE_HEADER,
449 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
450 );
451 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
452
453 let data = vec![0xff; *PAGE_SIZE * 2];
455 vmo.write(&data, 0).expect("fill VMO with 0xff");
456 vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
458
459 let vmo =
461 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
462
463 let mapper = TrackingMapper::new();
464 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
465
466 let mut mapping_iter = mapper.into_iter();
467
468 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
470
471 let mut data = vec![0; *PAGE_SIZE];
473 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
474
475 let expected = ELF_DATA
478 .into_iter()
479 .cloned()
480 .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
481 .collect::<Vec<u8>>();
482
483 assert_eq!(&expected, &data);
484
485 assert_matches!(mapping_iter.next(), None);
487 }
488
489 #[test]
490 fn map_read_only_vmo_with_page_aligned_bss() {
491 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
493 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
494 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
495 segment_type: elf_parse::SegmentType::Load as u32,
496 flags: elf_parse::SegmentFlags::from_bits_truncate(
497 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
498 )
499 .bits(),
500 offset: *PAGE_SIZE,
501 vaddr: 0x10000,
502 paddr: 0x10000,
503 filesz: *PAGE_SIZE as u64,
504 memsz: *PAGE_SIZE as u64 * 2,
505 align: *PAGE_SIZE as u64,
506 });
507 let headers = elf_parse::Elf64Headers::new_for_test(
508 ELF_FILE_HEADER,
509 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
510 );
511 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
512 let pattern = vec![0xff; *PAGE_SIZE * 2];
514 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
515
516 let vmo =
519 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
520
521 let mapper = TrackingMapper::new();
522 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
523
524 let mut mapping_iter = mapper.into_iter();
525
526 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
530 assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
531
532 let mut data = vec![0; *PAGE_SIZE];
533
534 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
536 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
537
538 let mapping = mapping_iter.next().expect("mapping from BSS VMO");
539
540 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
542 let zero = vec![0; *PAGE_SIZE];
543 assert_eq!(&data, &zero);
544
545 assert_matches!(mapping_iter.next(), None);
547 }
548
549 #[test]
550 fn map_read_only_vmo_with_no_bss() {
551 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
553 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
554 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
555 segment_type: elf_parse::SegmentType::Load as u32,
556 flags: elf_parse::SegmentFlags::from_bits_truncate(
557 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
558 )
559 .bits(),
560 offset: *PAGE_SIZE,
561 vaddr: 0x10000,
562 paddr: 0x10000,
563 filesz: *PAGE_SIZE as u64,
564 memsz: *PAGE_SIZE as u64,
565 align: *PAGE_SIZE as u64,
566 });
567 let headers = elf_parse::Elf64Headers::new_for_test(
568 ELF_FILE_HEADER,
569 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
570 );
571 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
572 let pattern = vec![0xff; *PAGE_SIZE * 2];
574 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
575
576 let vmo =
579 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
580
581 let mapper = TrackingMapper::new();
582 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
583
584 let mut mapping_iter = mapper.into_iter();
585
586 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
590 assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
591
592 let mut data = vec![0; *PAGE_SIZE];
593
594 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
596 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
597
598 assert_matches!(mapping_iter.next(), None);
600 }
601
602 #[test]
603 fn map_read_only_vmo_with_write_flag() {
604 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
606 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
607 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
608 segment_type: elf_parse::SegmentType::Load as u32,
609 flags: elf_parse::SegmentFlags::from_bits_truncate(
610 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
611 )
612 .bits(),
613 offset: *PAGE_SIZE,
614 vaddr: 0x10000,
615 paddr: 0x10000,
616 filesz: *PAGE_SIZE as u64,
617 memsz: *PAGE_SIZE as u64,
618 align: *PAGE_SIZE as u64,
619 });
620 let headers = elf_parse::Elf64Headers::new_for_test(
621 ELF_FILE_HEADER,
622 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
623 );
624 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
625
626 let vmo =
629 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
630
631 let mapper = TrackingMapper::new();
632 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
633
634 let mut mapping_iter = mapper.into_iter();
635
636 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
640 assert_ne!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
641
642 mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
644
645 assert_matches!(mapping_iter.next(), None);
647 }
648
649 #[test]
650 fn segment_with_zero_file_size() {
651 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
653 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
654 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
655 segment_type: elf_parse::SegmentType::Load as u32,
656 flags: elf_parse::SegmentFlags::from_bits_truncate(
657 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
658 )
659 .bits(),
660 offset: *PAGE_SIZE,
661 vaddr: 0x10000,
662 paddr: 0x10000,
663 filesz: 0,
664 memsz: 1,
665 align: *PAGE_SIZE as u64,
666 });
667 let headers = elf_parse::Elf64Headers::new_for_test(
668 ELF_FILE_HEADER,
669 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
670 );
671 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
672
673 let mapper = TrackingMapper::new();
674 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
675 for mapping in mapper.into_iter() {
676 assert!(mapping.length != 0);
677 }
678 }
679
680 #[test]
681 fn map_execute_only_segment() {
682 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
683 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
684 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
685 segment_type: elf_parse::SegmentType::Load as u32,
686 flags: elf_parse::SegmentFlags::from_bits_truncate(
687 elf_parse::SegmentFlags::EXECUTE.bits(),
688 )
689 .bits(),
690 offset: *PAGE_SIZE,
691 vaddr: 0x10000,
692 paddr: 0x10000,
693 filesz: 0x10,
694 memsz: 0x10,
695 align: *PAGE_SIZE as u64,
696 });
697 let headers = elf_parse::Elf64Headers::new_for_test(
698 ELF_FILE_HEADER,
699 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
700 );
701 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
702
703 let mapper = TrackingMapper::new();
704 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
705
706 let mut mapping_iter = mapper.into_iter();
707 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
708 assert_eq!(
709 mapping.flags,
710 zx::VmarFlags::SPECIFIC
711 | zx::VmarFlags::ALLOW_FAULTS
712 | zx::VmarFlags::PERM_EXECUTE
713 | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
714 );
715
716 assert_matches!(mapping_iter.next(), None);
718 }
719}