1use crate::{elf_parse as elf, util};
8use thiserror::Error;
9use zx::{self as zx, AsHandleRef};
10
11#[derive(Error, Debug)]
13pub enum ElfLoadError {
14 #[error("ELF load segments were empty")]
15 NothingToLoad,
16 #[error("Failed to allocate VMAR for ELF: {}", _0)]
17 VmarAllocate(zx::Status),
18 #[error("Failed to map VMAR: {}", _0)]
19 VmarMap(zx::Status),
20 #[error("Failed to create CoW VMO clone: {}", _0)]
21 VmoCowClone(zx::Status),
22 #[error("Failed to create VMO: {}", _0)]
23 VmoCreate(zx::Status),
24 #[error("Failed to read from VMO: {}", _0)]
25 VmoRead(zx::Status),
26 #[error("Failed to write to VMO: {}", _0)]
27 VmoWrite(zx::Status),
28 #[error("Failed to get VMO name: {}", _0)]
29 GetVmoName(zx::Status),
30 #[error("Failed to set VMO name: {}", _0)]
31 SetVmoName(zx::Status),
32}
33
34impl ElfLoadError {
35 pub fn as_zx_status(&self) -> zx::Status {
37 match self {
38 ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
39 ElfLoadError::VmarAllocate(s)
40 | ElfLoadError::VmarMap(s)
41 | ElfLoadError::VmoCowClone(s)
42 | ElfLoadError::VmoCreate(s)
43 | ElfLoadError::VmoRead(s)
44 | ElfLoadError::VmoWrite(s)
45 | ElfLoadError::GetVmoName(s)
46 | ElfLoadError::SetVmoName(s) => *s,
47 }
48 }
49}
50
51#[derive(Debug)]
53pub struct LoadedElfInfo {
54 pub low: usize,
56
57 pub high: usize,
59
60 pub max_perm: elf::SegmentFlags,
62}
63
64pub fn loaded_elf_info(headers: &elf::Elf64Headers) -> LoadedElfInfo {
68 let (mut first, mut low, mut high) = (true, 0, 0);
69 let mut max_perm = elf::SegmentFlags::empty();
70 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
71 if first {
73 low = util::page_start(hdr.vaddr);
74 first = false;
75 }
76 high = util::page_end(hdr.vaddr + hdr.memsz as usize);
77 max_perm |= hdr.flags();
78 }
79 LoadedElfInfo { low, high, max_perm }
80}
81
82#[derive(Debug)]
84pub struct LoadedElf {
85 pub vmar: zx::Vmar,
87
88 pub vmar_base: usize,
90
91 pub entry: usize,
93}
94
95pub trait Mapper {
97 fn map(
101 &self,
102 vmar_offset: usize,
103 vmo: &zx::Vmo,
104 vmo_offset: u64,
105 length: usize,
106 flags: zx::VmarFlags,
107 ) -> Result<usize, zx::Status>;
108}
109
110impl Mapper for zx::Vmar {
111 fn map(
112 &self,
113 vmar_offset: usize,
114 vmo: &zx::Vmo,
115 vmo_offset: u64,
116 length: usize,
117 flags: zx::VmarFlags,
118 ) -> Result<usize, zx::Status> {
119 Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
120 }
121}
122
123pub fn load_elf(
125 vmo: &zx::Vmo,
126 headers: &elf::Elf64Headers,
127 root_vmar: &zx::Vmar,
128) -> Result<LoadedElf, ElfLoadError> {
129 let info = loaded_elf_info(headers);
130 let size = info.high - info.low;
131 if size == 0 {
132 return Err(ElfLoadError::NothingToLoad);
133 }
134
135 let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
138 let (vmar, vmar_base) =
139 root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
140
141 let vaddr_bias = vmar_base.wrapping_sub(info.low);
145
146 map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
147 Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
148}
149
150pub fn map_elf_segments(
152 vmo: &zx::Vmo,
153 headers: &elf::Elf64Headers,
154 mapper: &dyn Mapper,
155 mapper_base: usize,
156 vaddr_bias: usize,
157) -> Result<(), ElfLoadError> {
158 let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
168 let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
169 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
170 let adjust = util::page_offset(hdr.offset);
172 let mut file_offset = hdr.offset - adjust;
173 let file_size = hdr.filesz + adjust as u64;
174 let virt_offset = hdr.vaddr - adjust;
175 let virt_size = hdr.memsz + adjust as u64;
176
177 let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
180
181 let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
185
186 let vmo_to_map: &zx::Vmo;
190 let writeable_vmo: zx::Vmo;
191 if must_write || (file_size > 0 && hdr.flags().contains(elf::SegmentFlags::WRITE)) {
192 writeable_vmo = vmo
193 .create_child(
194 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
195 file_offset as u64,
196 util::page_end(file_size as usize) as u64,
197 )
198 .map_err(ElfLoadError::VmoCowClone)?;
199 writeable_vmo
200 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
201 .map_err(ElfLoadError::SetVmoName)?;
202 file_offset = 0;
204
205 if virt_size > file_size {
207 let memset_size = util::page_end(file_size as usize) - file_size as usize;
209 if memset_size > 0 {
210 writeable_vmo
211 .write(&vec![0u8; memset_size], file_size)
212 .map_err(|s| ElfLoadError::VmoWrite(s))?;
213 }
214 }
215 vmo_to_map = &writeable_vmo;
216 } else {
217 vmo_to_map = vmo;
218 }
219
220 let flags = zx::VmarFlags::SPECIFIC
224 | zx::VmarFlags::ALLOW_FAULTS
225 | elf_to_vmar_perm_flags(&hdr.flags());
226 if file_size != 0 {
227 mapper
228 .map(
229 virt_addr,
230 vmo_to_map,
231 file_offset as u64,
232 util::page_end(file_size as usize),
233 flags,
234 )
235 .map_err(ElfLoadError::VmarMap)?;
236 }
237
238 if virt_size > file_size {
242 let bss_vmo_start = util::page_end(file_size as usize);
244 let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
245 if bss_vmo_size > 0 {
246 let anon_vmo =
247 zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
248 anon_vmo
249 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
250 .map_err(ElfLoadError::SetVmoName)?;
251 mapper
252 .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
253 .map_err(ElfLoadError::VmarMap)?;
254 }
255 }
256 }
257 Ok(())
258}
259
260const VMO_NAME_PREFIX_BSS: &str = "bss:";
262const VMO_NAME_PREFIX_DATA: &str = "data:";
263
264fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
266 assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
267 if name.is_empty() {
268 zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
269 } else {
270 zx::Name::new_lossy(&format!("{prefix}{name}"))
271 }
272}
273
274fn elf_to_vmar_can_map_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
275 let mut flags = zx::VmarFlags::empty();
276 if elf_flags.contains(elf::SegmentFlags::READ) {
277 flags |= zx::VmarFlags::CAN_MAP_READ;
278 }
279 if elf_flags.contains(elf::SegmentFlags::WRITE) {
280 flags |= zx::VmarFlags::CAN_MAP_WRITE;
281 }
282 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
283 flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
284 }
285 flags
286}
287
288fn elf_to_vmar_perm_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
289 let mut flags = zx::VmarFlags::empty();
290 if elf_flags.contains(elf::SegmentFlags::READ) {
291 flags |= zx::VmarFlags::PERM_READ;
292 }
293 if elf_flags.contains(elf::SegmentFlags::WRITE) {
294 flags |= zx::VmarFlags::PERM_WRITE;
295 }
296 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
297 flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
298 }
299 flags
300}
301
302#[cfg(test)]
303mod tests {
304 use super::*;
305 use crate::elf_parse;
306 use assert_matches::assert_matches;
307 use fidl::HandleBased;
308 use std::cell::RefCell;
309 use std::mem::size_of;
310 use std::sync::LazyLock;
311
312 #[test]
313 fn test_vmo_name_with_prefix() {
314 let empty_vmo_name = zx::Name::default();
315 let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
316 let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
317
318 assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
319 assert_eq!(
320 vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
321 "bss:short_vmo_name",
322 );
323 assert_eq!(
324 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
325 "bss:a_great_maximum_length_vmo_",
326 );
327 assert_eq!(
328 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
329 "data:a_great_maximum_length_vmo",
330 );
331
332 assert_eq!(
333 vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
334 "a_long_vmo_name_prefix:<unknown",
335 );
336 assert_eq!(
337 vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
338 max_vmo_name,
339 );
340 assert_eq!(
341 vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
342 "anystringherea_great_maximum_le"
343 );
344 }
345
346 #[derive(Debug)]
347 struct RecordedMapping {
348 vmo: zx::Vmo,
349 vmo_offset: u64,
350 length: usize,
351 flags: zx::VmarFlags,
352 }
353
354 struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
356
357 impl TrackingMapper {
358 fn new() -> Self {
359 Self(RefCell::new(Vec::new()))
360 }
361 }
362
363 impl IntoIterator for TrackingMapper {
364 type Item = RecordedMapping;
365 type IntoIter = std::vec::IntoIter<Self::Item>;
366
367 fn into_iter(self) -> Self::IntoIter {
368 self.0.into_inner().into_iter()
369 }
370 }
371
372 impl Mapper for TrackingMapper {
373 fn map(
374 &self,
375 vmar_offset: usize,
376 vmo: &zx::Vmo,
377 vmo_offset: u64,
378 length: usize,
379 flags: zx::VmarFlags,
380 ) -> Result<usize, zx::Status> {
381 self.0.borrow_mut().push(RecordedMapping {
382 vmo: vmo.as_handle_ref().duplicate(zx::Rights::SAME_RIGHTS).unwrap().into(),
383 vmo_offset,
384 length,
385 flags,
386 });
387 Ok(vmar_offset)
388 }
389 }
390
391 const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
393 ident: elf_parse::ElfIdent {
394 magic: elf_parse::ELF_MAGIC,
395 class: elf_parse::ElfClass::Elf64 as u8,
396 data: elf_parse::NATIVE_ENCODING as u8,
397 version: elf_parse::ElfVersion::Current as u8,
398 osabi: 0x00,
399 abiversion: 0x00,
400 pad: [0; 7],
401 },
402 elf_type: elf_parse::ElfType::SharedObject as u16,
403 machine: elf_parse::CURRENT_ARCH as u16,
404 version: elf_parse::ElfVersion::Current as u32,
405 entry: 0x10000,
406 phoff: size_of::<elf_parse::Elf64FileHeader>(),
407 shoff: 0,
408 flags: 0,
409 ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
410 phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
411 phnum: 1,
412 shentsize: 0,
413 shnum: 0,
414 shstrndx: 0,
415 };
416
417 const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
420 zx::Rights::DUPLICATE.bits()
421 | zx::Rights::TRANSFER.bits()
422 | zx::Rights::READ.bits()
423 | zx::Rights::WRITE.bits()
424 | zx::Rights::MAP.bits()
425 | zx::Rights::GET_PROPERTY.bits()
426 | zx::Rights::SET_PROPERTY.bits(),
427 );
428
429 #[test]
430 fn map_read_only_with_page_unaligned_bss() {
431 const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
432
433 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
435 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
436 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
437 segment_type: elf_parse::SegmentType::Load as u32,
438 flags: elf_parse::SegmentFlags::from_bits_truncate(
439 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
440 )
441 .bits(),
442 offset: *PAGE_SIZE,
443 vaddr: 0x10000,
444 paddr: 0x10000,
445 filesz: ELF_DATA.len() as u64,
446 memsz: 0x100,
447 align: *PAGE_SIZE as u64,
448 });
449 let headers = elf_parse::Elf64Headers::new_for_test(
450 ELF_FILE_HEADER,
451 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
452 );
453 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
454
455 let data = vec![0xff; *PAGE_SIZE * 2];
457 vmo.write(&data, 0).expect("fill VMO with 0xff");
458 vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
460
461 let vmo =
463 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
464
465 let mapper = TrackingMapper::new();
466 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
467
468 let mut mapping_iter = mapper.into_iter();
469
470 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
472
473 let mut data = vec![0; *PAGE_SIZE];
475 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
476
477 let expected = ELF_DATA
480 .into_iter()
481 .cloned()
482 .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
483 .collect::<Vec<u8>>();
484
485 assert_eq!(&expected, &data);
486
487 assert_matches!(mapping_iter.next(), None);
489 }
490
491 #[test]
492 fn map_read_only_vmo_with_page_aligned_bss() {
493 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
495 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
496 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
497 segment_type: elf_parse::SegmentType::Load as u32,
498 flags: elf_parse::SegmentFlags::from_bits_truncate(
499 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
500 )
501 .bits(),
502 offset: *PAGE_SIZE,
503 vaddr: 0x10000,
504 paddr: 0x10000,
505 filesz: *PAGE_SIZE as u64,
506 memsz: *PAGE_SIZE as u64 * 2,
507 align: *PAGE_SIZE as u64,
508 });
509 let headers = elf_parse::Elf64Headers::new_for_test(
510 ELF_FILE_HEADER,
511 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
512 );
513 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
514 let pattern = vec![0xff; *PAGE_SIZE * 2];
516 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
517
518 let vmo =
521 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
522
523 let mapper = TrackingMapper::new();
524 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
525
526 let mut mapping_iter = mapper.into_iter();
527
528 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
532 assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
533
534 let mut data = vec![0; *PAGE_SIZE];
535
536 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
538 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
539
540 let mapping = mapping_iter.next().expect("mapping from BSS VMO");
541
542 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
544 let zero = vec![0; *PAGE_SIZE];
545 assert_eq!(&data, &zero);
546
547 assert_matches!(mapping_iter.next(), None);
549 }
550
551 #[test]
552 fn map_read_only_vmo_with_no_bss() {
553 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
555 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
556 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
557 segment_type: elf_parse::SegmentType::Load as u32,
558 flags: elf_parse::SegmentFlags::from_bits_truncate(
559 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
560 )
561 .bits(),
562 offset: *PAGE_SIZE,
563 vaddr: 0x10000,
564 paddr: 0x10000,
565 filesz: *PAGE_SIZE as u64,
566 memsz: *PAGE_SIZE as u64,
567 align: *PAGE_SIZE as u64,
568 });
569 let headers = elf_parse::Elf64Headers::new_for_test(
570 ELF_FILE_HEADER,
571 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
572 );
573 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
574 let pattern = vec![0xff; *PAGE_SIZE * 2];
576 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
577
578 let vmo =
581 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
582
583 let mapper = TrackingMapper::new();
584 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
585
586 let mut mapping_iter = mapper.into_iter();
587
588 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
592 assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
593
594 let mut data = vec![0; *PAGE_SIZE];
595
596 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
598 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
599
600 assert_matches!(mapping_iter.next(), None);
602 }
603
604 #[test]
605 fn map_read_only_vmo_with_write_flag() {
606 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
608 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
609 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
610 segment_type: elf_parse::SegmentType::Load as u32,
611 flags: elf_parse::SegmentFlags::from_bits_truncate(
612 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
613 )
614 .bits(),
615 offset: *PAGE_SIZE,
616 vaddr: 0x10000,
617 paddr: 0x10000,
618 filesz: *PAGE_SIZE as u64,
619 memsz: *PAGE_SIZE as u64,
620 align: *PAGE_SIZE as u64,
621 });
622 let headers = elf_parse::Elf64Headers::new_for_test(
623 ELF_FILE_HEADER,
624 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
625 );
626 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
627
628 let vmo =
631 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
632
633 let mapper = TrackingMapper::new();
634 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
635
636 let mut mapping_iter = mapper.into_iter();
637
638 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
642 assert_ne!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
643
644 mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
646
647 assert_matches!(mapping_iter.next(), None);
649 }
650
651 #[test]
652 fn segment_with_zero_file_size() {
653 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
655 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
656 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
657 segment_type: elf_parse::SegmentType::Load as u32,
658 flags: elf_parse::SegmentFlags::from_bits_truncate(
659 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
660 )
661 .bits(),
662 offset: *PAGE_SIZE,
663 vaddr: 0x10000,
664 paddr: 0x10000,
665 filesz: 0,
666 memsz: 1,
667 align: *PAGE_SIZE as u64,
668 });
669 let headers = elf_parse::Elf64Headers::new_for_test(
670 ELF_FILE_HEADER,
671 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
672 );
673 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
674
675 let mapper = TrackingMapper::new();
676 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
677 for mapping in mapper.into_iter() {
678 assert!(mapping.length != 0);
679 }
680 }
681
682 #[test]
683 fn map_execute_only_segment() {
684 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
685 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
686 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
687 segment_type: elf_parse::SegmentType::Load as u32,
688 flags: elf_parse::SegmentFlags::from_bits_truncate(
689 elf_parse::SegmentFlags::EXECUTE.bits(),
690 )
691 .bits(),
692 offset: *PAGE_SIZE,
693 vaddr: 0x10000,
694 paddr: 0x10000,
695 filesz: 0x10,
696 memsz: 0x10,
697 align: *PAGE_SIZE as u64,
698 });
699 let headers = elf_parse::Elf64Headers::new_for_test(
700 ELF_FILE_HEADER,
701 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
702 );
703 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
704
705 let mapper = TrackingMapper::new();
706 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
707
708 let mut mapping_iter = mapper.into_iter();
709 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
710 assert_eq!(
711 mapping.flags,
712 zx::VmarFlags::SPECIFIC
713 | zx::VmarFlags::ALLOW_FAULTS
714 | zx::VmarFlags::PERM_EXECUTE
715 | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
716 );
717
718 assert_matches!(mapping_iter.next(), None);
720 }
721}