1use crate::{elf_parse as elf, util};
8use thiserror::Error;
9
10#[derive(Error, Debug)]
12pub enum ElfLoadError {
13 #[error("ELF load segments were empty")]
14 NothingToLoad,
15 #[error("Failed to allocate VMAR for ELF: {}", _0)]
16 VmarAllocate(zx::Status),
17 #[error("Failed to map VMAR: {}", _0)]
18 VmarMap(zx::Status),
19 #[error("Failed to create CoW VMO clone: {}", _0)]
20 VmoCowClone(zx::Status),
21 #[error("Failed to create VMO: {}", _0)]
22 VmoCreate(zx::Status),
23 #[error("Failed to read from VMO: {}", _0)]
24 VmoRead(zx::Status),
25 #[error("Failed to write to VMO: {}", _0)]
26 VmoWrite(zx::Status),
27 #[error("Failed to get VMO name: {}", _0)]
28 GetVmoName(zx::Status),
29 #[error("Failed to set VMO name: {}", _0)]
30 SetVmoName(zx::Status),
31}
32
33impl ElfLoadError {
34 pub fn as_zx_status(&self) -> zx::Status {
36 match self {
37 ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
38 ElfLoadError::VmarAllocate(s)
39 | ElfLoadError::VmarMap(s)
40 | ElfLoadError::VmoCowClone(s)
41 | ElfLoadError::VmoCreate(s)
42 | ElfLoadError::VmoRead(s)
43 | ElfLoadError::VmoWrite(s)
44 | ElfLoadError::GetVmoName(s)
45 | ElfLoadError::SetVmoName(s) => *s,
46 }
47 }
48}
49
50#[derive(Debug)]
52pub struct LoadedElfInfo {
53 pub low: usize,
55
56 pub high: usize,
58
59 pub max_perm: elf::SegmentFlags,
61}
62
63pub fn loaded_elf_info(headers: &elf::Elf64Headers) -> LoadedElfInfo {
67 let (mut first, mut low, mut high) = (true, 0, 0);
68 let mut max_perm = elf::SegmentFlags::empty();
69 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
70 if first {
72 low = util::page_start(hdr.vaddr);
73 first = false;
74 }
75 high = util::page_end(hdr.vaddr + hdr.memsz as usize);
76 max_perm |= hdr.flags();
77 }
78 LoadedElfInfo { low, high, max_perm }
79}
80
81#[derive(Debug)]
83pub struct LoadedElf {
84 pub vmar: zx::Vmar,
86
87 pub vmar_base: usize,
89
90 pub entry: usize,
92}
93
94pub trait Mapper {
96 fn map(
100 &self,
101 vmar_offset: usize,
102 vmo: &zx::Vmo,
103 vmo_offset: u64,
104 length: usize,
105 flags: zx::VmarFlags,
106 ) -> Result<usize, zx::Status>;
107}
108
109impl Mapper for zx::Vmar {
110 fn map(
111 &self,
112 vmar_offset: usize,
113 vmo: &zx::Vmo,
114 vmo_offset: u64,
115 length: usize,
116 flags: zx::VmarFlags,
117 ) -> Result<usize, zx::Status> {
118 Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
119 }
120}
121
122pub fn load_elf(
124 vmo: &zx::Vmo,
125 headers: &elf::Elf64Headers,
126 root_vmar: &zx::Vmar,
127) -> Result<LoadedElf, ElfLoadError> {
128 let info = loaded_elf_info(headers);
129 let size = info.high - info.low;
130 if size == 0 {
131 return Err(ElfLoadError::NothingToLoad);
132 }
133
134 let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
137 let (vmar, vmar_base) =
138 root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
139
140 let vaddr_bias = vmar_base.wrapping_sub(info.low);
144
145 map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
146 Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
147}
148
149pub fn map_elf_segments(
151 vmo: &zx::Vmo,
152 headers: &elf::Elf64Headers,
153 mapper: &dyn Mapper,
154 mapper_base: usize,
155 vaddr_bias: usize,
156) -> Result<(), ElfLoadError> {
157 let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
167 let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
168 for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
169 let adjust = util::page_offset(hdr.offset);
171 let mut file_offset = hdr.offset - adjust;
172 let file_size = hdr.filesz + adjust as u64;
173 let virt_offset = hdr.vaddr - adjust;
174 let virt_size = hdr.memsz + adjust as u64;
175
176 let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
179
180 let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
184
185 let vmo_to_map: &zx::Vmo;
189 let writeable_vmo: zx::Vmo;
190 if must_write || (file_size > 0 && hdr.flags().contains(elf::SegmentFlags::WRITE)) {
191 writeable_vmo = vmo
192 .create_child(
193 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
194 file_offset as u64,
195 util::page_end(file_size as usize) as u64,
196 )
197 .map_err(ElfLoadError::VmoCowClone)?;
198 writeable_vmo
199 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
200 .map_err(ElfLoadError::SetVmoName)?;
201 file_offset = 0;
203
204 if virt_size > file_size {
206 let memset_size = util::page_end(file_size as usize) - file_size as usize;
208 if memset_size > 0 {
209 writeable_vmo
210 .write(&vec![0u8; memset_size], file_size)
211 .map_err(|s| ElfLoadError::VmoWrite(s))?;
212 }
213 }
214 vmo_to_map = &writeable_vmo;
215 } else {
216 vmo_to_map = vmo;
217 }
218
219 let flags = zx::VmarFlags::SPECIFIC
223 | zx::VmarFlags::ALLOW_FAULTS
224 | elf_to_vmar_perm_flags(&hdr.flags());
225 if file_size != 0 {
226 mapper
227 .map(
228 virt_addr,
229 vmo_to_map,
230 file_offset as u64,
231 util::page_end(file_size as usize),
232 flags,
233 )
234 .map_err(ElfLoadError::VmarMap)?;
235 }
236
237 if virt_size > file_size {
241 let bss_vmo_start = util::page_end(file_size as usize);
243 let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
244 if bss_vmo_size > 0 {
245 let anon_vmo =
246 zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
247 anon_vmo
248 .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
249 .map_err(ElfLoadError::SetVmoName)?;
250 mapper
251 .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
252 .map_err(ElfLoadError::VmarMap)?;
253 }
254 }
255 }
256 Ok(())
257}
258
259const VMO_NAME_PREFIX_BSS: &str = "bss:";
261const VMO_NAME_PREFIX_DATA: &str = "data:";
262
263fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
265 assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
266 if name.is_empty() {
267 zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
268 } else {
269 zx::Name::new_lossy(&format!("{prefix}{name}"))
270 }
271}
272
273fn elf_to_vmar_can_map_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
274 let mut flags = zx::VmarFlags::empty();
275 if elf_flags.contains(elf::SegmentFlags::READ) {
276 flags |= zx::VmarFlags::CAN_MAP_READ;
277 }
278 if elf_flags.contains(elf::SegmentFlags::WRITE) {
279 flags |= zx::VmarFlags::CAN_MAP_WRITE;
280 }
281 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
282 flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
283 }
284 flags
285}
286
287fn elf_to_vmar_perm_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
288 let mut flags = zx::VmarFlags::empty();
289 if elf_flags.contains(elf::SegmentFlags::READ) {
290 flags |= zx::VmarFlags::PERM_READ;
291 }
292 if elf_flags.contains(elf::SegmentFlags::WRITE) {
293 flags |= zx::VmarFlags::PERM_WRITE;
294 }
295 if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
296 flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
297 }
298 flags
299}
300
301#[cfg(test)]
302mod tests {
303 use super::*;
304 use crate::elf_parse;
305 use assert_matches::assert_matches;
306 use fidl::HandleBased;
307 use std::cell::RefCell;
308 use std::mem::size_of;
309 use std::sync::LazyLock;
310
311 #[test]
312 fn test_vmo_name_with_prefix() {
313 let empty_vmo_name = zx::Name::default();
314 let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
315 let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
316
317 assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
318 assert_eq!(
319 vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
320 "bss:short_vmo_name",
321 );
322 assert_eq!(
323 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
324 "bss:a_great_maximum_length_vmo_",
325 );
326 assert_eq!(
327 vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
328 "data:a_great_maximum_length_vmo",
329 );
330
331 assert_eq!(
332 vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
333 "a_long_vmo_name_prefix:<unknown",
334 );
335 assert_eq!(
336 vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
337 max_vmo_name,
338 );
339 assert_eq!(
340 vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
341 "anystringherea_great_maximum_le"
342 );
343 }
344
345 #[derive(Debug)]
346 struct RecordedMapping {
347 vmo: zx::Vmo,
348 vmo_offset: u64,
349 length: usize,
350 flags: zx::VmarFlags,
351 }
352
353 struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
355
356 impl TrackingMapper {
357 fn new() -> Self {
358 Self(RefCell::new(Vec::new()))
359 }
360 }
361
362 impl IntoIterator for TrackingMapper {
363 type Item = RecordedMapping;
364 type IntoIter = std::vec::IntoIter<Self::Item>;
365
366 fn into_iter(self) -> Self::IntoIter {
367 self.0.into_inner().into_iter()
368 }
369 }
370
371 impl Mapper for TrackingMapper {
372 fn map(
373 &self,
374 vmar_offset: usize,
375 vmo: &zx::Vmo,
376 vmo_offset: u64,
377 length: usize,
378 flags: zx::VmarFlags,
379 ) -> Result<usize, zx::Status> {
380 self.0.borrow_mut().push(RecordedMapping {
381 vmo: vmo.duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
382 vmo_offset,
383 length,
384 flags,
385 });
386 Ok(vmar_offset)
387 }
388 }
389
390 const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
392 ident: elf_parse::ElfIdent {
393 magic: elf_parse::ELF_MAGIC,
394 class: elf_parse::ElfClass::Elf64 as u8,
395 data: elf_parse::NATIVE_ENCODING as u8,
396 version: elf_parse::ElfVersion::Current as u8,
397 osabi: 0x00,
398 abiversion: 0x00,
399 pad: [0; 7],
400 },
401 elf_type: elf_parse::ElfType::SharedObject as u16,
402 machine: elf_parse::CURRENT_ARCH as u16,
403 version: elf_parse::ElfVersion::Current as u32,
404 entry: 0x10000,
405 phoff: size_of::<elf_parse::Elf64FileHeader>(),
406 shoff: 0,
407 flags: 0,
408 ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
409 phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
410 phnum: 1,
411 shentsize: 0,
412 shnum: 0,
413 shstrndx: 0,
414 };
415
416 const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
419 zx::Rights::DUPLICATE.bits()
420 | zx::Rights::TRANSFER.bits()
421 | zx::Rights::READ.bits()
422 | zx::Rights::WRITE.bits()
423 | zx::Rights::MAP.bits()
424 | zx::Rights::GET_PROPERTY.bits()
425 | zx::Rights::SET_PROPERTY.bits(),
426 );
427
428 #[test]
429 fn map_read_only_with_page_unaligned_bss() {
430 const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
431
432 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
434 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
435 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
436 segment_type: elf_parse::SegmentType::Load as u32,
437 flags: elf_parse::SegmentFlags::from_bits_truncate(
438 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
439 )
440 .bits(),
441 offset: *PAGE_SIZE,
442 vaddr: 0x10000,
443 paddr: 0x10000,
444 filesz: ELF_DATA.len() as u64,
445 memsz: 0x100,
446 align: *PAGE_SIZE as u64,
447 });
448 let headers = elf_parse::Elf64Headers::new_for_test(
449 ELF_FILE_HEADER,
450 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
451 );
452 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
453
454 let data = vec![0xff; *PAGE_SIZE * 2];
456 vmo.write(&data, 0).expect("fill VMO with 0xff");
457 vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
459
460 let vmo =
462 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
463
464 let mapper = TrackingMapper::new();
465 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
466
467 let mut mapping_iter = mapper.into_iter();
468
469 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
471
472 let mut data = vec![0; *PAGE_SIZE];
474 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
475
476 let expected = ELF_DATA
479 .into_iter()
480 .cloned()
481 .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
482 .collect::<Vec<u8>>();
483
484 assert_eq!(&expected, &data);
485
486 assert_matches!(mapping_iter.next(), None);
488 }
489
490 #[test]
491 fn map_read_only_vmo_with_page_aligned_bss() {
492 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
494 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
495 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
496 segment_type: elf_parse::SegmentType::Load as u32,
497 flags: elf_parse::SegmentFlags::from_bits_truncate(
498 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
499 )
500 .bits(),
501 offset: *PAGE_SIZE,
502 vaddr: 0x10000,
503 paddr: 0x10000,
504 filesz: *PAGE_SIZE as u64,
505 memsz: *PAGE_SIZE as u64 * 2,
506 align: *PAGE_SIZE as u64,
507 });
508 let headers = elf_parse::Elf64Headers::new_for_test(
509 ELF_FILE_HEADER,
510 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
511 );
512 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
513 let pattern = vec![0xff; *PAGE_SIZE * 2];
515 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
516
517 let vmo =
520 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
521
522 let mapper = TrackingMapper::new();
523 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
524
525 let mut mapping_iter = mapper.into_iter();
526
527 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
531 assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
532
533 let mut data = vec![0; *PAGE_SIZE];
534
535 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
537 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
538
539 let mapping = mapping_iter.next().expect("mapping from BSS VMO");
540
541 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
543 let zero = vec![0; *PAGE_SIZE];
544 assert_eq!(&data, &zero);
545
546 assert_matches!(mapping_iter.next(), None);
548 }
549
550 #[test]
551 fn map_read_only_vmo_with_no_bss() {
552 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
554 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
555 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
556 segment_type: elf_parse::SegmentType::Load as u32,
557 flags: elf_parse::SegmentFlags::from_bits_truncate(
558 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
559 )
560 .bits(),
561 offset: *PAGE_SIZE,
562 vaddr: 0x10000,
563 paddr: 0x10000,
564 filesz: *PAGE_SIZE as u64,
565 memsz: *PAGE_SIZE as u64,
566 align: *PAGE_SIZE as u64,
567 });
568 let headers = elf_parse::Elf64Headers::new_for_test(
569 ELF_FILE_HEADER,
570 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
571 );
572 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
573 let pattern = vec![0xff; *PAGE_SIZE * 2];
575 vmo.write(&pattern, 0).expect("fill VMO with 0xff");
576
577 let vmo =
580 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
581
582 let mapper = TrackingMapper::new();
583 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
584
585 let mut mapping_iter = mapper.into_iter();
586
587 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
591 assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
592
593 let mut data = vec![0; *PAGE_SIZE];
594
595 mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
597 assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
598
599 assert_matches!(mapping_iter.next(), None);
601 }
602
603 #[test]
604 fn map_read_only_vmo_with_write_flag() {
605 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
607 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
608 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
609 segment_type: elf_parse::SegmentType::Load as u32,
610 flags: elf_parse::SegmentFlags::from_bits_truncate(
611 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
612 )
613 .bits(),
614 offset: *PAGE_SIZE,
615 vaddr: 0x10000,
616 paddr: 0x10000,
617 filesz: *PAGE_SIZE as u64,
618 memsz: *PAGE_SIZE as u64,
619 align: *PAGE_SIZE as u64,
620 });
621 let headers = elf_parse::Elf64Headers::new_for_test(
622 ELF_FILE_HEADER,
623 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
624 );
625 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
626
627 let vmo =
630 vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
631
632 let mapper = TrackingMapper::new();
633 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
634
635 let mut mapping_iter = mapper.into_iter();
636
637 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
641 assert_ne!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
642
643 mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
645
646 assert_matches!(mapping_iter.next(), None);
648 }
649
650 #[test]
651 fn segment_with_zero_file_size() {
652 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
654 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
655 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
656 segment_type: elf_parse::SegmentType::Load as u32,
657 flags: elf_parse::SegmentFlags::from_bits_truncate(
658 elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
659 )
660 .bits(),
661 offset: *PAGE_SIZE,
662 vaddr: 0x10000,
663 paddr: 0x10000,
664 filesz: 0,
665 memsz: 1,
666 align: *PAGE_SIZE as u64,
667 });
668 let headers = elf_parse::Elf64Headers::new_for_test(
669 ELF_FILE_HEADER,
670 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
671 );
672 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
673
674 let mapper = TrackingMapper::new();
675 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
676 for mapping in mapper.into_iter() {
677 assert!(mapping.length != 0);
678 }
679 }
680
681 #[test]
682 fn map_execute_only_segment() {
683 static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
684 static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
685 LazyLock::new(|| elf_parse::Elf64ProgramHeader {
686 segment_type: elf_parse::SegmentType::Load as u32,
687 flags: elf_parse::SegmentFlags::from_bits_truncate(
688 elf_parse::SegmentFlags::EXECUTE.bits(),
689 )
690 .bits(),
691 offset: *PAGE_SIZE,
692 vaddr: 0x10000,
693 paddr: 0x10000,
694 filesz: 0x10,
695 memsz: 0x10,
696 align: *PAGE_SIZE as u64,
697 });
698 let headers = elf_parse::Elf64Headers::new_for_test(
699 ELF_FILE_HEADER,
700 Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
701 );
702 let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
703
704 let mapper = TrackingMapper::new();
705 map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
706
707 let mut mapping_iter = mapper.into_iter();
708 let mapping = mapping_iter.next().expect("mapping from ELF VMO");
709 assert_eq!(
710 mapping.flags,
711 zx::VmarFlags::SPECIFIC
712 | zx::VmarFlags::ALLOW_FAULTS
713 | zx::VmarFlags::PERM_EXECUTE
714 | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
715 );
716
717 assert_matches!(mapping_iter.next(), None);
719 }
720}