1use crate::{elf_parse as elf, util};
8use thiserror::Error;
9use zx::{self as zx, AsHandleRef};
10
11#[derive(Error, Debug)]
13pub enum ElfLoadError {
14 #[error("ELF load segments were empty")]
15 NothingToLoad,
16 #[error("Failed to allocate VMAR for ELF: {}", _0)]
17 VmarAllocate(zx::Status),
18 #[error("Failed to map VMAR: {}", _0)]
19 VmarMap(zx::Status),
20 #[error("Failed to create CoW VMO clone: {}", _0)]
21 VmoCowClone(zx::Status),
22 #[error("Failed to create VMO: {}", _0)]
23 VmoCreate(zx::Status),
24 #[error("Failed to read from VMO: {}", _0)]
25 VmoRead(zx::Status),
26 #[error("Failed to write to VMO: {}", _0)]
27 VmoWrite(zx::Status),
28 #[error("Failed to get VMO name: {}", _0)]
29 GetVmoName(zx::Status),
30 #[error("Failed to set VMO name: {}", _0)]
31 SetVmoName(zx::Status),
32}
33
34impl ElfLoadError {
35 pub fn as_zx_status(&self) -> zx::Status {
37 match self {
38 ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
39 ElfLoadError::VmarAllocate(s)
40 | ElfLoadError::VmarMap(s)
41 | ElfLoadError::VmoCowClone(s)
42 | ElfLoadError::VmoCreate(s)
43 | ElfLoadError::VmoRead(s)
44 | ElfLoadError::VmoWrite(s)
45 | ElfLoadError::GetVmoName(s)
46 | ElfLoadError::SetVmoName(s) => *s,
47 }
48 }
49}
50
51#[derive(Debug)]
53pub struct LoadedElfInfo {
54 pub low: usize,
56
57 pub high: usize,
59
60 pub max_perm: elf::SegmentFlags,
62}
63
64pub fn loaded_elf_info(headers: &elf::Elf64Headers) -> LoadedElfInfo {
68 let (mut first, mut low, mut high) = (true, 0, 0);
69 let mut max_perm = elf::SegmentFlags::empty();
70 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
71 if first {
73 low = util::page_start(hdr.vaddr);
74 first = false;
75 }
76 high = util::page_end(hdr.vaddr + hdr.memsz as usize);
77 max_perm |= hdr.flags();
78 }
79 LoadedElfInfo { low, high, max_perm }
80}
81
82#[derive(Debug)]
84pub struct LoadedElf {
85 pub vmar: zx::Vmar,
87
88 pub vmar_base: usize,
90
91 pub entry: usize,
93}
94
95pub trait Mapper {
97 fn map(
101 &self,
102 vmar_offset: usize,
103 vmo: &zx::Vmo,
104 vmo_offset: u64,
105 length: usize,
106 flags: zx::VmarFlags,
107 ) -> Result<usize, zx::Status>;
108}
109
110impl Mapper for zx::Vmar {
111 fn map(
112 &self,
113 vmar_offset: usize,
114 vmo: &zx::Vmo,
115 vmo_offset: u64,
116 length: usize,
117 flags: zx::VmarFlags,
118 ) -> Result<usize, zx::Status> {
119 Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
120 }
121}
122
123pub fn load_elf(
125 vmo: &zx::Vmo,
126 headers: &elf::Elf64Headers,
127 root_vmar: &zx::Vmar,
128) -> Result<LoadedElf, ElfLoadError> {
129 let info = loaded_elf_info(headers);
130 let size = info.high - info.low;
131 if size == 0 {
132 return Err(ElfLoadError::NothingToLoad);
133 }
134
135 let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
138 let (vmar, vmar_base) =
139 root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
140
141 let vaddr_bias = vmar_base.wrapping_sub(info.low);
145
146 map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
147 Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
148}
149
150pub fn map_elf_segments(
152 vmo: &zx::Vmo,
153 headers: &elf::Elf64Headers,
154 mapper: &dyn Mapper,
155 mapper_base: usize,
156 vaddr_bias: usize,
157) -> Result<(), ElfLoadError> {
158 let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
168 let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
169 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
170 let adjust = util::page_offset(hdr.offset);
172 let mut file_offset = hdr.offset - adjust;
173 let file_size = hdr.filesz + adjust as u64;
174 let virt_offset = hdr.vaddr - adjust;
175 let virt_size = hdr.memsz + adjust as u64;
176
177 let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
180
181 let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
185
186 let vmo_to_map: &zx::Vmo;
190 let writeable_vmo: zx::Vmo;
191 if must_write || (file_size > 0 && hdr.flags().contains(elf::SegmentFlags::WRITE)) {
192 writeable_vmo = vmo
193 .create_child(
194 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
195 file_offset as u64,
196 util::page_end(file_size as usize) as u64,
197 )
198 .map_err(ElfLoadError::VmoCowClone)?;
199 writeable_vmo
200 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
201 .map_err(ElfLoadError::SetVmoName)?;
202 file_offset = 0;
204
205 if virt_size > file_size {
207 let memset_size = util::page_end(file_size as usize) - file_size as usize;
209 if memset_size > 0 {
210 writeable_vmo
211 .write(&vec![0u8; memset_size], file_size)
212 .map_err(|s| ElfLoadError::VmoWrite(s))?;
213 }
214 }
215 vmo_to_map = &writeable_vmo;
216 } else {
217 vmo_to_map = vmo;
218 }
219
220 let flags = zx::VmarFlags::SPECIFIC
224 | zx::VmarFlags::ALLOW_FAULTS
225 | elf_to_vmar_perm_flags(&hdr.flags());
226 if file_size != 0 {
227 mapper
228 .map(
229 virt_addr,
230 vmo_to_map,
231 file_offset as u64,
232 util::page_end(file_size as usize),
233 flags,
234 )
235 .map_err(ElfLoadError::VmarMap)?;
236 }
237
238 if virt_size > file_size {
242 let bss_vmo_start = util::page_end(file_size as usize);
244 let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
245 if bss_vmo_size > 0 {
246 let anon_vmo =
247 zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
248 anon_vmo
249 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
250 .map_err(ElfLoadError::SetVmoName)?;
251 mapper
252 .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
253 .map_err(ElfLoadError::VmarMap)?;
254 }
255 }
256 }
257 Ok(())
258}
259
260const VMO_NAME_PREFIX_BSS: &str = "bss:";
262const VMO_NAME_PREFIX_DATA: &str = "data:";
263
264fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
266 assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
267 if name.is_empty() {
268 zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
269 } else {
270 zx::Name::new_lossy(&format!("{prefix}{name}"))
271 }
272}
273
274fn elf_to_vmar_can_map_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
275 let mut flags = zx::VmarFlags::empty();
276 if elf_flags.contains(elf::SegmentFlags::READ) {
277 flags |= zx::VmarFlags::CAN_MAP_READ;
278 }
279 if elf_flags.contains(elf::SegmentFlags::WRITE) {
280 flags |= zx::VmarFlags::CAN_MAP_WRITE;
281 }
282 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
283 flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
284 }
285 flags
286}
287
288fn elf_to_vmar_perm_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
289 let mut flags = zx::VmarFlags::empty();
290 if elf_flags.contains(elf::SegmentFlags::READ) {
291 flags |= zx::VmarFlags::PERM_READ;
292 }
293 if elf_flags.contains(elf::SegmentFlags::WRITE) {
294 flags |= zx::VmarFlags::PERM_WRITE;
295 }
296 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
297 flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
298 }
299 flags
300}
301
302#[cfg(test)]
303mod tests {
304 use super::*;
305 use crate::elf_parse;
306 use assert_matches::assert_matches;
307 use fidl::HandleBased;
308 use lazy_static::lazy_static;
309 use std::cell::RefCell;
310 use std::mem::size_of;
311
312 #[test]
313 fn test_vmo_name_with_prefix() {
314 let empty_vmo_name = zx::Name::default();
315 let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
316 let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
317
318 assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
319 assert_eq!(
320 vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
321 "bss:short_vmo_name",
322 );
323 assert_eq!(
324 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
325 "bss:a_great_maximum_length_vmo_",
326 );
327 assert_eq!(
328 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
329 "data:a_great_maximum_length_vmo",
330 );
331
332 assert_eq!(
333 vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
334 "a_long_vmo_name_prefix:<unknown",
335 );
336 assert_eq!(
337 vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
338 max_vmo_name,
339 );
340 assert_eq!(
341 vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
342 "anystringherea_great_maximum_le"
343 );
344 }
345
346 #[derive(Debug)]
347 struct RecordedMapping {
348 vmo: zx::Vmo,
349 vmo_offset: u64,
350 length: usize,
351 flags: zx::VmarFlags,
352 }
353
354 struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
356
357 impl TrackingMapper {
358 fn new() -> Self {
359 Self(RefCell::new(Vec::new()))
360 }
361 }
362
363 impl IntoIterator for TrackingMapper {
364 type Item = RecordedMapping;
365 type IntoIter = std::vec::IntoIter<Self::Item>;
366
367 fn into_iter(self) -> Self::IntoIter {
368 self.0.into_inner().into_iter()
369 }
370 }
371
372 impl Mapper for TrackingMapper {
373 fn map(
374 &self,
375 vmar_offset: usize,
376 vmo: &zx::Vmo,
377 vmo_offset: u64,
378 length: usize,
379 flags: zx::VmarFlags,
380 ) -> Result<usize, zx::Status> {
381 self.0.borrow_mut().push(RecordedMapping {
382 vmo: vmo.as_handle_ref().duplicate(zx::Rights::SAME_RIGHTS).unwrap().into(),
383 vmo_offset,
384 length,
385 flags,
386 });
387 Ok(vmar_offset)
388 }
389 }
390
391 const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
393 ident: elf_parse::ElfIdent {
394 magic: elf_parse::ELF_MAGIC,
395 class: elf_parse::ElfClass::Elf64 as u8,
396 data: elf_parse::NATIVE_ENCODING as u8,
397 version: elf_parse::ElfVersion::Current as u8,
398 osabi: 0x00,
399 abiversion: 0x00,
400 pad: [0; 7],
401 },
402 elf_type: elf_parse::ElfType::SharedObject as u16,
403 machine: elf_parse::CURRENT_ARCH as u16,
404 version: elf_parse::ElfVersion::Current as u32,
405 entry: 0x10000,
406 phoff: size_of::<elf_parse::Elf64FileHeader>(),
407 shoff: 0,
408 flags: 0,
409 ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
410 phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
411 phnum: 1,
412 shentsize: 0,
413 shnum: 0,
414 shstrndx: 0,
415 };
416
417 const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
420 zx::Rights::DUPLICATE.bits()
421 | zx::Rights::TRANSFER.bits()
422 | zx::Rights::READ.bits()
423 | zx::Rights::WRITE.bits()
424 | zx::Rights::MAP.bits()
425 | zx::Rights::GET_PROPERTY.bits()
426 | zx::Rights::SET_PROPERTY.bits(),
427 );
428
429 #[test]
430 fn map_read_only_with_page_unaligned_bss() {
431 const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
432
433 lazy_static! {
435 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
436 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
437 elf_parse::Elf64ProgramHeader {
438 segment_type: elf_parse::SegmentType::Load as u32,
439 flags: elf_parse::SegmentFlags::from_bits_truncate(
440 elf_parse::SegmentFlags::READ.bits()
441 | elf_parse::SegmentFlags::EXECUTE.bits(),
442 )
443 .bits(),
444 offset: *PAGE_SIZE,
445 vaddr: 0x10000,
446 paddr: 0x10000,
447 filesz: ELF_DATA.len() as u64,
448 memsz: 0x100,
449 align: *PAGE_SIZE as u64,
450 };
451 }
452 let headers = elf_parse::Elf64Headers::new_for_test(
453 ELF_FILE_HEADER,
454 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
455 );
456 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
457
458 let data = vec![0xff; *PAGE_SIZE * 2];
460 vmo.write(&data, 0).expect("fill VMO with 0xff");
461 vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
463
464 let vmo =
466 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
467
468 let mapper = TrackingMapper::new();
469 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
470
471 let mut mapping_iter = mapper.into_iter();
472
473 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
475
476 let mut data = vec![0; *PAGE_SIZE];
478 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
479
480 let expected = ELF_DATA
483 .into_iter()
484 .cloned()
485 .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
486 .collect::<Vec<u8>>();
487
488 assert_eq!(&expected, &data);
489
490 assert_matches!(mapping_iter.next(), None);
492 }
493
494 #[test]
495 fn map_read_only_vmo_with_page_aligned_bss() {
496 lazy_static! {
498 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
499 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
500 elf_parse::Elf64ProgramHeader {
501 segment_type: elf_parse::SegmentType::Load as u32,
502 flags: elf_parse::SegmentFlags::from_bits_truncate(
503 elf_parse::SegmentFlags::READ.bits()
504 | elf_parse::SegmentFlags::EXECUTE.bits(),
505 )
506 .bits(),
507 offset: *PAGE_SIZE as usize,
508 vaddr: 0x10000,
509 paddr: 0x10000,
510 filesz: *PAGE_SIZE as u64,
511 memsz: *PAGE_SIZE as u64 * 2,
512 align: *PAGE_SIZE as u64,
513 };
514 }
515 let headers = elf_parse::Elf64Headers::new_for_test(
516 ELF_FILE_HEADER,
517 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
518 );
519 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
520 let pattern = vec![0xff; *PAGE_SIZE * 2];
522 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
523
524 let vmo =
527 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
528
529 let mapper = TrackingMapper::new();
530 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
531
532 let mut mapping_iter = mapper.into_iter();
533
534 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
538 assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
539
540 let mut data = vec![0; *PAGE_SIZE];
541
542 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
544 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
545
546 let mapping = mapping_iter.next().expect("mapping from BSS VMO");
547
548 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
550 let zero = vec![0; *PAGE_SIZE];
551 assert_eq!(&data, &zero);
552
553 assert_matches!(mapping_iter.next(), None);
555 }
556
557 #[test]
558 fn map_read_only_vmo_with_no_bss() {
559 lazy_static! {
561 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
562 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
563 elf_parse::Elf64ProgramHeader {
564 segment_type: elf_parse::SegmentType::Load as u32,
565 flags: elf_parse::SegmentFlags::from_bits_truncate(
566 elf_parse::SegmentFlags::READ.bits()
567 | elf_parse::SegmentFlags::EXECUTE.bits(),
568 )
569 .bits(),
570 offset: *PAGE_SIZE as usize,
571 vaddr: 0x10000,
572 paddr: 0x10000,
573 filesz: *PAGE_SIZE as u64,
574 memsz: *PAGE_SIZE as u64,
575 align: *PAGE_SIZE as u64,
576 };
577 }
578 let headers = elf_parse::Elf64Headers::new_for_test(
579 ELF_FILE_HEADER,
580 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
581 );
582 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
583 let pattern = vec![0xff; *PAGE_SIZE * 2];
585 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
586
587 let vmo =
590 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
591
592 let mapper = TrackingMapper::new();
593 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
594
595 let mut mapping_iter = mapper.into_iter();
596
597 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
601 assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
602
603 let mut data = vec![0; *PAGE_SIZE];
604
605 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
607 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
608
609 assert_matches!(mapping_iter.next(), None);
611 }
612
613 #[test]
614 fn map_read_only_vmo_with_write_flag() {
615 lazy_static! {
617 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
618 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
619 elf_parse::Elf64ProgramHeader {
620 segment_type: elf_parse::SegmentType::Load as u32,
621 flags: elf_parse::SegmentFlags::from_bits_truncate(
622 elf_parse::SegmentFlags::READ.bits()
623 | elf_parse::SegmentFlags::WRITE.bits(),
624 )
625 .bits(),
626 offset: *PAGE_SIZE as usize,
627 vaddr: 0x10000,
628 paddr: 0x10000,
629 filesz: *PAGE_SIZE as u64,
630 memsz: *PAGE_SIZE as u64,
631 align: *PAGE_SIZE as u64,
632 };
633 }
634 let headers = elf_parse::Elf64Headers::new_for_test(
635 ELF_FILE_HEADER,
636 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
637 );
638 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
639
640 let vmo =
643 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
644
645 let mapper = TrackingMapper::new();
646 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
647
648 let mut mapping_iter = mapper.into_iter();
649
650 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
654 assert_ne!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
655
656 mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
658
659 assert_matches!(mapping_iter.next(), None);
661 }
662
663 #[test]
664 fn segment_with_zero_file_size() {
665 lazy_static! {
667 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
668 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
669 elf_parse::Elf64ProgramHeader {
670 segment_type: elf_parse::SegmentType::Load as u32,
671 flags: elf_parse::SegmentFlags::from_bits_truncate(
672 elf_parse::SegmentFlags::READ.bits()
673 | elf_parse::SegmentFlags::WRITE.bits(),
674 )
675 .bits(),
676 offset: *PAGE_SIZE as usize,
677 vaddr: 0x10000,
678 paddr: 0x10000,
679 filesz: 0,
680 memsz: 1,
681 align: *PAGE_SIZE as u64,
682 };
683 }
684 let headers = elf_parse::Elf64Headers::new_for_test(
685 ELF_FILE_HEADER,
686 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
687 );
688 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
689
690 let mapper = TrackingMapper::new();
691 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
692 for mapping in mapper.into_iter() {
693 assert!(mapping.length != 0);
694 }
695 }
696
697 #[test]
698 fn map_execute_only_segment() {
699 lazy_static! {
700 static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
701 static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
702 elf_parse::Elf64ProgramHeader {
703 segment_type: elf_parse::SegmentType::Load as u32,
704 flags: elf_parse::SegmentFlags::from_bits_truncate(
705 elf_parse::SegmentFlags::EXECUTE.bits(),
706 )
707 .bits(),
708 offset: *PAGE_SIZE as usize,
709 vaddr: 0x10000,
710 paddr: 0x10000,
711 filesz: 0x10,
712 memsz: 0x10,
713 align: *PAGE_SIZE as u64,
714 };
715 }
716 let headers = elf_parse::Elf64Headers::new_for_test(
717 ELF_FILE_HEADER,
718 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
719 );
720 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
721
722 let mapper = TrackingMapper::new();
723 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
724
725 let mut mapping_iter = mapper.into_iter();
726 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
727 assert_eq!(
728 mapping.flags,
729 zx::VmarFlags::SPECIFIC
730 | zx::VmarFlags::ALLOW_FAULTS
731 | zx::VmarFlags::PERM_EXECUTE
732 | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
733 );
734
735 assert_matches!(mapping_iter.next(), None);
737 }
738}