Skip to main content

process_builder/
elf_load.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Utilities for loading ELF files into an existing address space.
6
7use crate::util;
8use thiserror::Error;
9
10/// Possible errors that can occur during ELF loading.
11#[derive(Error, Debug)]
12pub enum ElfLoadError {
13    #[error("ELF load segments were empty")]
14    NothingToLoad,
15    #[error("Failed to allocate VMAR for ELF: {}", _0)]
16    VmarAllocate(zx::Status),
17    #[error("Failed to map VMAR: {}", _0)]
18    VmarMap(zx::Status),
19    #[error("Failed to create CoW VMO clone: {}", _0)]
20    VmoCowClone(zx::Status),
21    #[error("Failed to create VMO: {}", _0)]
22    VmoCreate(zx::Status),
23    #[error("Failed to read from VMO: {}", _0)]
24    VmoRead(zx::Status),
25    #[error("Failed to write to VMO: {}", _0)]
26    VmoWrite(zx::Status),
27    #[error("Failed to get VMO name: {}", _0)]
28    GetVmoName(zx::Status),
29    #[error("Failed to set VMO name: {}", _0)]
30    SetVmoName(zx::Status),
31}
32
33impl ElfLoadError {
34    /// Returns an appropriate zx::Status code for the given error.
35    pub fn as_zx_status(&self) -> zx::Status {
36        match self {
37            ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
38            ElfLoadError::VmarAllocate(s)
39            | ElfLoadError::VmarMap(s)
40            | ElfLoadError::VmoCowClone(s)
41            | ElfLoadError::VmoCreate(s)
42            | ElfLoadError::VmoRead(s)
43            | ElfLoadError::VmoWrite(s)
44            | ElfLoadError::GetVmoName(s)
45            | ElfLoadError::SetVmoName(s) => *s,
46        }
47    }
48}
49
50/// Information on what an ELF requires of its address space.
51#[derive(Debug)]
52pub struct LoadedElfInfo {
53    /// The lowest address of the loaded ELF.
54    pub low: usize,
55
56    /// The highest address of the loaded ELF.
57    pub high: usize,
58
59    /// Union of all address space permissions required to load the ELF.
60    pub max_perm: elf_parse::SegmentFlags,
61}
62
63/// Returns the address space requirements to load this ELF. Attempting to load it into a VMAR with
64/// permissions less than max_perm, or at a base such that the range [base+low, base+high] is not
65/// entirely valid, will fail.
66pub fn loaded_elf_info(headers: &elf_parse::Elf64Headers) -> LoadedElfInfo {
67    let (mut first, mut low, mut high) = (true, 0, 0);
68    let mut max_perm = elf_parse::SegmentFlags::empty();
69    for hdr in headers.program_headers_with_type(elf_parse::SegmentType::Load) {
70        // elf_parse already checked that segments are ordered by vaddr and do not overlap.
71        if first {
72            low = util::page_start(hdr.vaddr);
73            first = false;
74        }
75        high = util::page_end(hdr.vaddr + hdr.memsz as usize);
76        max_perm |= hdr.flags();
77    }
78    LoadedElfInfo { low, high, max_perm }
79}
80
81/// Return value of load_elf.
82#[derive(Debug)]
83pub struct LoadedElf {
84    /// The VMAR that the ELF file was loaded into.
85    pub vmar: zx::Vmar,
86
87    /// The virtual address of the VMAR.
88    pub vmar_base: usize,
89
90    /// The ELF entry point, adjusted for the base address of the VMAR.
91    pub entry: usize,
92}
93
94/// A trait so that callers of map_elf_segments can hook the map operation.
95pub trait Mapper {
96    /// Map memory from the given VMO at the specified location.
97    ///
98    /// See zx::Vmar::map for more details.
99    fn map(
100        &self,
101        vmar_offset: usize,
102        vmo: &zx::Vmo,
103        vmo_offset: u64,
104        length: usize,
105        flags: zx::VmarFlags,
106    ) -> Result<usize, zx::Status>;
107}
108
109impl Mapper for zx::Vmar {
110    fn map(
111        &self,
112        vmar_offset: usize,
113        vmo: &zx::Vmo,
114        vmo_offset: u64,
115        length: usize,
116        flags: zx::VmarFlags,
117    ) -> Result<usize, zx::Status> {
118        Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
119    }
120}
121
122/// Load an ELF into a new sub-VMAR of the specified root.
123pub fn load_elf(
124    vmo: &zx::Vmo,
125    headers: &elf_parse::Elf64Headers,
126    root_vmar: &zx::Vmar,
127) -> Result<LoadedElf, ElfLoadError> {
128    let info = loaded_elf_info(headers);
129    let size = info.high - info.low;
130    if size == 0 {
131        return Err(ElfLoadError::NothingToLoad);
132    }
133
134    // Individual mappings with be restricted based on segment permissions, but we also limit the
135    // overall VMAR to the maximum permissions required across all load segments.
136    let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
137    let (vmar, vmar_base) =
138        root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
139
140    // Get the relative bias between p_vaddr addresses in the headers and the allocated VMAR,
141    // rather than for the root VMAR. Should be equal to the first segment's starting vaddr
142    // negated, so that the first mapping starts at 0 within the allocated VMAR.
143    let vaddr_bias = vmar_base.wrapping_sub(info.low);
144
145    map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
146    Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
147}
148
149/// Map the segments of an ELF into an existing VMAR.
150pub fn map_elf_segments(
151    vmo: &zx::Vmo,
152    headers: &elf_parse::Elf64Headers,
153    mapper: &dyn Mapper,
154    mapper_base: usize,
155    vaddr_bias: usize,
156) -> Result<(), ElfLoadError> {
157    // We intentionally use wrapping subtraction here, in case the ELF file happens to use vaddr's
158    // that are higher than the VMAR base chosen by the kernel. Wrapping addition will be used when
159    // adding this bias to vaddr values.
160    //
161    // For arch32 entries, the caller must assure that the relative bias will
162    // not underflow. Beyond that, no virtual offset supplied by an ELF32 header
163    // is able to overflow 64-bit addition and if the mapping lands outside of
164    // addressable User memory, the Mapper/MemoryManager will disallow the
165    // mapping.
166    let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
167    let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
168    for hdr in headers.program_headers_with_type(elf_parse::SegmentType::Load) {
169        // Shift the start of the mapping down to the nearest page.
170        let adjust = util::page_offset(hdr.offset);
171        let mut file_offset = hdr.offset - adjust;
172        let file_size = hdr.filesz + adjust as u64;
173        let virt_offset = hdr.vaddr - adjust;
174        let virt_size = hdr.memsz + adjust as u64;
175
176        // Calculate the virtual address range that this mapping needs to cover. These addresses
177        // are relative to the allocated VMAR, not the root VMAR.
178        let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
179
180        // If the segment is specified as larger than the data in the file, and the data in the file
181        // does not end at a page boundary, we will need to zero out the remaining memory in the
182        // page.
183        let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
184
185        // If this segment is writeable (and we're mapping in some VMO content, i.e. it's not
186        // all zero initialized) or the segment has a BSS section that needs to be zeroed, create
187        // a writeable clone of the VMO. Otherwise use the potentially read-only VMO passed in.
188        let vmo_to_map: &zx::Vmo;
189        let writeable_vmo: zx::Vmo;
190        if must_write || (file_size > 0 && hdr.flags().contains(elf_parse::SegmentFlags::WRITE)) {
191            writeable_vmo = vmo
192                .create_child(
193                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
194                    file_offset as u64,
195                    util::page_end(file_size as usize) as u64,
196                )
197                .map_err(ElfLoadError::VmoCowClone)?;
198            writeable_vmo
199                .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
200                .map_err(ElfLoadError::SetVmoName)?;
201            // Update addresses into the VMO that will be mapped.
202            file_offset = 0;
203
204            // Zero-out the memory between the end of the filesize and the end of the page.
205            if virt_size > file_size {
206                // If the space to be zero-filled overlaps with the VMO, we need to memset it.
207                let memset_size = util::page_end(file_size as usize) - file_size as usize;
208                if memset_size > 0 {
209                    writeable_vmo
210                        .write(&vec![0u8; memset_size], file_size)
211                        .map_err(|s| ElfLoadError::VmoWrite(s))?;
212                }
213            }
214            vmo_to_map = &writeable_vmo;
215        } else {
216            vmo_to_map = vmo;
217        }
218
219        // Create the VMO part of the mapping.
220        // The VMO can be pager-backed, so include the ALLOW_FAULTS flag. ALLOW_FAULTS is a no-op
221        // if not applicable to the VMO type.
222        let flags = zx::VmarFlags::SPECIFIC
223            | zx::VmarFlags::ALLOW_FAULTS
224            | elf_to_vmar_perm_flags(&hdr.flags());
225        if file_size != 0 {
226            mapper
227                .map(
228                    virt_addr,
229                    vmo_to_map,
230                    file_offset as u64,
231                    util::page_end(file_size as usize),
232                    flags,
233                )
234                .map_err(ElfLoadError::VmarMap)?;
235        }
236
237        // If the mapping is specified as larger than the data in the file (i.e. virt_size is
238        // larger than file_size), the remainder of the space (from virt_addr + file_size to
239        // virt_addr + virt_size) is the BSS and must be filled with zeros.
240        if virt_size > file_size {
241            // The rest of the BSS is created as an anonymous vmo.
242            let bss_vmo_start = util::page_end(file_size as usize);
243            let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
244            if bss_vmo_size > 0 {
245                let anon_vmo =
246                    zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
247                anon_vmo
248                    .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
249                    .map_err(ElfLoadError::SetVmoName)?;
250                mapper
251                    .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
252                    .map_err(ElfLoadError::VmarMap)?;
253            }
254        }
255    }
256    Ok(())
257}
258
259// These must not be longer than zx::sys::ZX_MAX_NAME_LEN.
260const VMO_NAME_PREFIX_BSS: &str = "bss:";
261const VMO_NAME_PREFIX_DATA: &str = "data:";
262
263// prefix length must be less than zx::sys::ZX_MAX_NAME_LEN-1 and not contain any nul bytes.
264fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
265    assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
266    if name.is_empty() {
267        zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
268    } else {
269        zx::Name::new_lossy(&format!("{prefix}{name}"))
270    }
271}
272
273fn elf_to_vmar_can_map_flags(elf_flags: &elf_parse::SegmentFlags) -> zx::VmarFlags {
274    let mut flags = zx::VmarFlags::empty();
275    if elf_flags.contains(elf_parse::SegmentFlags::READ) {
276        flags |= zx::VmarFlags::CAN_MAP_READ;
277    }
278    if elf_flags.contains(elf_parse::SegmentFlags::WRITE) {
279        flags |= zx::VmarFlags::CAN_MAP_WRITE;
280    }
281    if elf_flags.contains(elf_parse::SegmentFlags::EXECUTE) {
282        flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
283    }
284    flags
285}
286
287fn elf_to_vmar_perm_flags(elf_flags: &elf_parse::SegmentFlags) -> zx::VmarFlags {
288    let mut flags = zx::VmarFlags::empty();
289    if elf_flags.contains(elf_parse::SegmentFlags::READ) {
290        flags |= zx::VmarFlags::PERM_READ;
291    }
292    if elf_flags.contains(elf_parse::SegmentFlags::WRITE) {
293        flags |= zx::VmarFlags::PERM_WRITE;
294    }
295    if elf_flags.contains(elf_parse::SegmentFlags::EXECUTE) {
296        flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
297    }
298    flags
299}
300
301#[cfg(test)]
302mod tests {
303    use super::*;
304    use assert_matches::assert_matches;
305    use fidl::HandleBased;
306    use std::cell::RefCell;
307    use std::mem::size_of;
308    use std::sync::LazyLock;
309
310    #[test]
311    fn test_vmo_name_with_prefix() {
312        let empty_vmo_name = zx::Name::default();
313        let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
314        let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
315
316        assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
317        assert_eq!(
318            vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
319            "bss:short_vmo_name",
320        );
321        assert_eq!(
322            vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
323            "bss:a_great_maximum_length_vmo_",
324        );
325        assert_eq!(
326            vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
327            "data:a_great_maximum_length_vmo",
328        );
329
330        assert_eq!(
331            vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
332            "a_long_vmo_name_prefix:<unknown",
333        );
334        assert_eq!(
335            vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
336            max_vmo_name,
337        );
338        assert_eq!(
339            vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
340            "anystringherea_great_maximum_le"
341        );
342    }
343
344    #[derive(Debug)]
345    struct RecordedMapping {
346        vmo: zx::Vmo,
347        vmo_offset: u64,
348        length: usize,
349        flags: zx::VmarFlags,
350    }
351
352    /// Records which VMOs and the offset within them are to be mapped.
353    struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
354
355    impl TrackingMapper {
356        fn new() -> Self {
357            Self(RefCell::new(Vec::new()))
358        }
359    }
360
361    impl IntoIterator for TrackingMapper {
362        type Item = RecordedMapping;
363        type IntoIter = std::vec::IntoIter<Self::Item>;
364
365        fn into_iter(self) -> Self::IntoIter {
366            self.0.into_inner().into_iter()
367        }
368    }
369
370    impl Mapper for TrackingMapper {
371        fn map(
372            &self,
373            vmar_offset: usize,
374            vmo: &zx::Vmo,
375            vmo_offset: u64,
376            length: usize,
377            flags: zx::VmarFlags,
378        ) -> Result<usize, zx::Status> {
379            self.0.borrow_mut().push(RecordedMapping {
380                vmo: vmo.duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
381                vmo_offset,
382                length,
383                flags,
384            });
385            Ok(vmar_offset)
386        }
387    }
388
389    /// A basic ELF64 File header with one program header.
390    const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
391        ident: elf_parse::ElfIdent {
392            magic: elf_parse::ELF_MAGIC,
393            class: elf_parse::ElfClass::Elf64 as u8,
394            data: elf_parse::NATIVE_ENCODING as u8,
395            version: elf_parse::ElfVersion::Current as u8,
396            osabi: 0x00,
397            abiversion: 0x00,
398            pad: [0; 7],
399        },
400        elf_type: elf_parse::ElfType::SharedObject as u16,
401        machine: elf_parse::CURRENT_ARCH as u16,
402        version: elf_parse::ElfVersion::Current as u32,
403        entry: 0x10000,
404        phoff: size_of::<elf_parse::Elf64FileHeader>(),
405        shoff: 0,
406        flags: 0,
407        ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
408        phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
409        phnum: 1,
410        shentsize: 0,
411        shnum: 0,
412        shstrndx: 0,
413    };
414
415    // The bitwise `|` operator for `bitflags` is implemented through the `std::ops::BitOr` trait,
416    // which cannot be used in a const context. The workaround is to bitwise OR the raw bits.
417    const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
418        zx::Rights::DUPLICATE.bits()
419            | zx::Rights::TRANSFER.bits()
420            | zx::Rights::READ.bits()
421            | zx::Rights::WRITE.bits()
422            | zx::Rights::MAP.bits()
423            | zx::Rights::GET_PROPERTY.bits()
424            | zx::Rights::SET_PROPERTY.bits(),
425    );
426
427    #[test]
428    fn map_read_only_with_page_unaligned_bss() {
429        const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
430
431        // Contains a PT_LOAD segment where the filesz is less than memsz (BSS).
432        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
433        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
434            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
435                segment_type: elf_parse::SegmentType::Load as u32,
436                flags: elf_parse::SegmentFlags::from_bits_truncate(
437                    elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
438                )
439                .bits(),
440                offset: *PAGE_SIZE,
441                vaddr: 0x10000,
442                paddr: 0x10000,
443                filesz: ELF_DATA.len() as u64,
444                memsz: 0x100,
445                align: *PAGE_SIZE as u64,
446            });
447        let headers = elf_parse::Elf64Headers::new_for_test(
448            ELF_FILE_HEADER,
449            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
450        );
451        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
452
453        // Fill the VMO with 0xff, so that we can verify that the BSS section is correctly zeroed.
454        let data = vec![0xff; *PAGE_SIZE * 2];
455        vmo.write(&data, 0).expect("fill VMO with 0xff");
456        // Write the PT_LOAD segment's data at the defined offset.
457        vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
458
459        // Remove the ZX_RIGHT_WRITE right. Page zeroing should happen in a COW VMO.
460        let vmo =
461            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
462
463        let mapper = TrackingMapper::new();
464        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
465
466        let mut mapping_iter = mapper.into_iter();
467
468        // Extract the VMO and offset that was supposed to be mapped.
469        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
470
471        // Read a page of data that was "mapped".
472        let mut data = vec![0; *PAGE_SIZE];
473        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
474
475        // Construct the expected memory, which is ASCII "FUCHSIA!" followed by 0s for the rest of
476        // the page.
477        let expected = ELF_DATA
478            .into_iter()
479            .cloned()
480            .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
481            .collect::<Vec<u8>>();
482
483        assert_eq!(&expected, &data);
484
485        // No more mappings expected.
486        assert_matches!(mapping_iter.next(), None);
487    }
488
489    #[test]
490    fn map_read_only_vmo_with_page_aligned_bss() {
491        // Contains a PT_LOAD segment where the BSS starts at a page boundary.
492        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
493        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
494            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
495                segment_type: elf_parse::SegmentType::Load as u32,
496                flags: elf_parse::SegmentFlags::from_bits_truncate(
497                    elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
498                )
499                .bits(),
500                offset: *PAGE_SIZE,
501                vaddr: 0x10000,
502                paddr: 0x10000,
503                filesz: *PAGE_SIZE as u64,
504                memsz: *PAGE_SIZE as u64 * 2,
505                align: *PAGE_SIZE as u64,
506            });
507        let headers = elf_parse::Elf64Headers::new_for_test(
508            ELF_FILE_HEADER,
509            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
510        );
511        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
512        // Fill the VMO with 0xff, so we can verify the BSS section is correctly allocated.
513        let pattern = vec![0xff; *PAGE_SIZE * 2];
514        vmo.write(&pattern, 0).expect("fill VMO with 0xff");
515
516        // Remove the ZX_RIGHT_WRITE right. Since the BSS ends at a page boundary, we shouldn't
517        // need to zero out any of the pages in this VMO.
518        let vmo =
519            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
520
521        let mapper = TrackingMapper::new();
522        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
523
524        let mut mapping_iter = mapper.into_iter();
525
526        // Verify that a COW VMO was not created, since we didn't need to write to the original VMO.
527        // We must check that KOIDs are the same, since we duplicate the handle when recording it
528        // in TrackingMapper.
529        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
530        assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
531
532        let mut data = vec![0; *PAGE_SIZE];
533
534        // Ensure the first page is from the ELF.
535        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
536        assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
537
538        let mapping = mapping_iter.next().expect("mapping from BSS VMO");
539
540        // Ensure the second page is BSS.
541        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
542        let zero = vec![0; *PAGE_SIZE];
543        assert_eq!(&data, &zero);
544
545        // No more mappings expected.
546        assert_matches!(mapping_iter.next(), None);
547    }
548
549    #[test]
550    fn map_read_only_vmo_with_no_bss() {
551        // Contains a PT_LOAD segment where there is no BSS.
552        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
553        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
554            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
555                segment_type: elf_parse::SegmentType::Load as u32,
556                flags: elf_parse::SegmentFlags::from_bits_truncate(
557                    elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::EXECUTE.bits(),
558                )
559                .bits(),
560                offset: *PAGE_SIZE,
561                vaddr: 0x10000,
562                paddr: 0x10000,
563                filesz: *PAGE_SIZE as u64,
564                memsz: *PAGE_SIZE as u64,
565                align: *PAGE_SIZE as u64,
566            });
567        let headers = elf_parse::Elf64Headers::new_for_test(
568            ELF_FILE_HEADER,
569            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
570        );
571        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
572        // Fill the VMO with 0xff, so we can verify the BSS section is correctly allocated.
573        let pattern = vec![0xff; *PAGE_SIZE * 2];
574        vmo.write(&pattern, 0).expect("fill VMO with 0xff");
575
576        // Remove the ZX_RIGHT_WRITE right. Since the BSS ends at a page boundary, we shouldn't
577        // need to zero out any of the pages in this VMO.
578        let vmo =
579            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
580
581        let mapper = TrackingMapper::new();
582        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
583
584        let mut mapping_iter = mapper.into_iter();
585
586        // Verify that a COW VMO was not created, since we didn't need to write to the original VMO.
587        // We must check that KOIDs are the same, since we duplicate the handle when recording it
588        // in TrackingMapper.
589        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
590        assert_eq!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
591
592        let mut data = vec![0; *PAGE_SIZE];
593
594        // Ensure the first page is from the ELF.
595        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
596        assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
597
598        // No more mappings expected.
599        assert_matches!(mapping_iter.next(), None);
600    }
601
602    #[test]
603    fn map_read_only_vmo_with_write_flag() {
604        // Contains a PT_LOAD segment where there is no BSS.
605        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
606        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
607            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
608                segment_type: elf_parse::SegmentType::Load as u32,
609                flags: elf_parse::SegmentFlags::from_bits_truncate(
610                    elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
611                )
612                .bits(),
613                offset: *PAGE_SIZE,
614                vaddr: 0x10000,
615                paddr: 0x10000,
616                filesz: *PAGE_SIZE as u64,
617                memsz: *PAGE_SIZE as u64,
618                align: *PAGE_SIZE as u64,
619            });
620        let headers = elf_parse::Elf64Headers::new_for_test(
621            ELF_FILE_HEADER,
622            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
623        );
624        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
625
626        // Remove the ZX_RIGHT_WRITE right. Since the segment has a WRITE flag, a COW child VMO
627        // will be created.
628        let vmo =
629            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
630
631        let mapper = TrackingMapper::new();
632        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
633
634        let mut mapping_iter = mapper.into_iter();
635
636        // Verify that a COW VMO was created, since the segment had a WRITE flag.
637        // We must check that KOIDs are different, since we duplicate the handle when recording it
638        // in TrackingMapper.
639        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
640        assert_ne!(mapping.vmo.koid().unwrap(), vmo.koid().unwrap());
641
642        // Attempt to write to the VMO to ensure it has the ZX_RIGHT_WRITE right.
643        mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
644
645        // No more mappings expected.
646        assert_matches!(mapping_iter.next(), None);
647    }
648
649    #[test]
650    fn segment_with_zero_file_size() {
651        // Contains a PT_LOAD segment whose filesz is 0.
652        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
653        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
654            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
655                segment_type: elf_parse::SegmentType::Load as u32,
656                flags: elf_parse::SegmentFlags::from_bits_truncate(
657                    elf_parse::SegmentFlags::READ.bits() | elf_parse::SegmentFlags::WRITE.bits(),
658                )
659                .bits(),
660                offset: *PAGE_SIZE,
661                vaddr: 0x10000,
662                paddr: 0x10000,
663                filesz: 0,
664                memsz: 1,
665                align: *PAGE_SIZE as u64,
666            });
667        let headers = elf_parse::Elf64Headers::new_for_test(
668            ELF_FILE_HEADER,
669            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
670        );
671        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
672
673        let mapper = TrackingMapper::new();
674        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
675        for mapping in mapper.into_iter() {
676            assert!(mapping.length != 0);
677        }
678    }
679
680    #[test]
681    fn map_execute_only_segment() {
682        static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| zx::system_get_page_size() as usize);
683        static ELF_PROGRAM_HEADER: LazyLock<elf_parse::Elf64ProgramHeader> =
684            LazyLock::new(|| elf_parse::Elf64ProgramHeader {
685                segment_type: elf_parse::SegmentType::Load as u32,
686                flags: elf_parse::SegmentFlags::from_bits_truncate(
687                    elf_parse::SegmentFlags::EXECUTE.bits(),
688                )
689                .bits(),
690                offset: *PAGE_SIZE,
691                vaddr: 0x10000,
692                paddr: 0x10000,
693                filesz: 0x10,
694                memsz: 0x10,
695                align: *PAGE_SIZE as u64,
696            });
697        let headers = elf_parse::Elf64Headers::new_for_test(
698            ELF_FILE_HEADER,
699            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
700        );
701        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
702
703        let mapper = TrackingMapper::new();
704        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
705
706        let mut mapping_iter = mapper.into_iter();
707        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
708        assert_eq!(
709            mapping.flags,
710            zx::VmarFlags::SPECIFIC
711                | zx::VmarFlags::ALLOW_FAULTS
712                | zx::VmarFlags::PERM_EXECUTE
713                | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
714        );
715
716        // No more mappings expected.
717        assert_matches!(mapping_iter.next(), None);
718    }
719}