process_builder/
elf_load.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Utilities for loading ELF files into an existing address space.
6
7use crate::{elf_parse as elf, util};
8use thiserror::Error;
9use zx::{self as zx, AsHandleRef};
10
11/// Possible errors that can occur during ELF loading.
12#[derive(Error, Debug)]
13pub enum ElfLoadError {
14    #[error("ELF load segments were empty")]
15    NothingToLoad,
16    #[error("Failed to allocate VMAR for ELF: {}", _0)]
17    VmarAllocate(zx::Status),
18    #[error("Failed to map VMAR: {}", _0)]
19    VmarMap(zx::Status),
20    #[error("Failed to create CoW VMO clone: {}", _0)]
21    VmoCowClone(zx::Status),
22    #[error("Failed to create VMO: {}", _0)]
23    VmoCreate(zx::Status),
24    #[error("Failed to read from VMO: {}", _0)]
25    VmoRead(zx::Status),
26    #[error("Failed to write to VMO: {}", _0)]
27    VmoWrite(zx::Status),
28    #[error("Failed to get VMO name: {}", _0)]
29    GetVmoName(zx::Status),
30    #[error("Failed to set VMO name: {}", _0)]
31    SetVmoName(zx::Status),
32}
33
34impl ElfLoadError {
35    /// Returns an appropriate zx::Status code for the given error.
36    pub fn as_zx_status(&self) -> zx::Status {
37        match self {
38            ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND,
39            ElfLoadError::VmarAllocate(s)
40            | ElfLoadError::VmarMap(s)
41            | ElfLoadError::VmoCowClone(s)
42            | ElfLoadError::VmoCreate(s)
43            | ElfLoadError::VmoRead(s)
44            | ElfLoadError::VmoWrite(s)
45            | ElfLoadError::GetVmoName(s)
46            | ElfLoadError::SetVmoName(s) => *s,
47        }
48    }
49}
50
51/// Information on what an ELF requires of its address space.
52#[derive(Debug)]
53pub struct LoadedElfInfo {
54    /// The lowest address of the loaded ELF.
55    pub low: usize,
56
57    /// The highest address of the loaded ELF.
58    pub high: usize,
59
60    /// Union of all address space permissions required to load the ELF.
61    pub max_perm: elf::SegmentFlags,
62}
63
64/// Returns the address space requirements to load this ELF. Attempting to load it into a VMAR with
65/// permissions less than max_perm, or at a base such that the range [base+low, base+high] is not
66/// entirely valid, will fail.
67pub fn loaded_elf_info(headers: &elf::Elf64Headers) -> LoadedElfInfo {
68    let (mut first, mut low, mut high) = (true, 0, 0);
69    let mut max_perm = elf::SegmentFlags::empty();
70    for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
71        // elf_parse already checked that segments are ordered by vaddr and do not overlap.
72        if first {
73            low = util::page_start(hdr.vaddr);
74            first = false;
75        }
76        high = util::page_end(hdr.vaddr + hdr.memsz as usize);
77        max_perm |= hdr.flags();
78    }
79    LoadedElfInfo { low, high, max_perm }
80}
81
82/// Return value of load_elf.
83#[derive(Debug)]
84pub struct LoadedElf {
85    /// The VMAR that the ELF file was loaded into.
86    pub vmar: zx::Vmar,
87
88    /// The virtual address of the VMAR.
89    pub vmar_base: usize,
90
91    /// The ELF entry point, adjusted for the base address of the VMAR.
92    pub entry: usize,
93}
94
95/// A trait so that callers of map_elf_segments can hook the map operation.
96pub trait Mapper {
97    /// Map memory from the given VMO at the specified location.
98    ///
99    /// See zx::Vmar::map for more details.
100    fn map(
101        &self,
102        vmar_offset: usize,
103        vmo: &zx::Vmo,
104        vmo_offset: u64,
105        length: usize,
106        flags: zx::VmarFlags,
107    ) -> Result<usize, zx::Status>;
108}
109
110impl Mapper for zx::Vmar {
111    fn map(
112        &self,
113        vmar_offset: usize,
114        vmo: &zx::Vmo,
115        vmo_offset: u64,
116        length: usize,
117        flags: zx::VmarFlags,
118    ) -> Result<usize, zx::Status> {
119        Self::map(self, vmar_offset, vmo, vmo_offset, length, flags)
120    }
121}
122
123/// Load an ELF into a new sub-VMAR of the specified root.
124pub fn load_elf(
125    vmo: &zx::Vmo,
126    headers: &elf::Elf64Headers,
127    root_vmar: &zx::Vmar,
128) -> Result<LoadedElf, ElfLoadError> {
129    let info = loaded_elf_info(headers);
130    let size = info.high - info.low;
131    if size == 0 {
132        return Err(ElfLoadError::NothingToLoad);
133    }
134
135    // Individual mappings with be restricted based on segment permissions, but we also limit the
136    // overall VMAR to the maximum permissions required across all load segments.
137    let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&info.max_perm);
138    let (vmar, vmar_base) =
139        root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?;
140
141    // Get the relative bias between p_vaddr addresses in the headers and the allocated VMAR,
142    // rather than for the root VMAR. Should be equal to the first segment's starting vaddr
143    // negated, so that the first mapping starts at 0 within the allocated VMAR.
144    let vaddr_bias = vmar_base.wrapping_sub(info.low);
145
146    map_elf_segments(vmo, headers, &vmar, vmar_base, vaddr_bias)?;
147    Ok(LoadedElf { vmar, vmar_base, entry: headers.file_header().entry.wrapping_add(vaddr_bias) })
148}
149
150/// Map the segments of an ELF into an existing VMAR.
151pub fn map_elf_segments(
152    vmo: &zx::Vmo,
153    headers: &elf::Elf64Headers,
154    mapper: &dyn Mapper,
155    mapper_base: usize,
156    vaddr_bias: usize,
157) -> Result<(), ElfLoadError> {
158    // We intentionally use wrapping subtraction here, in case the ELF file happens to use vaddr's
159    // that are higher than the VMAR base chosen by the kernel. Wrapping addition will be used when
160    // adding this bias to vaddr values.
161    //
162    // For arch32 entries, the caller must assure that the relative bias will
163    // not underflow. Beyond that, no virtual offset supplied by an ELF32 header
164    // is able to overflow 64-bit addition and if the mapping lands outside of
165    // addressable User memory, the Mapper/MemoryManager will disallow the
166    // mapping.
167    let mapper_relative_bias = vaddr_bias.wrapping_sub(mapper_base);
168    let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?;
169    for hdr in headers.program_headers_with_type(elf::SegmentType::Load) {
170        // Shift the start of the mapping down to the nearest page.
171        let adjust = util::page_offset(hdr.offset);
172        let mut file_offset = hdr.offset - adjust;
173        let file_size = hdr.filesz + adjust as u64;
174        let virt_offset = hdr.vaddr - adjust;
175        let virt_size = hdr.memsz + adjust as u64;
176
177        // Calculate the virtual address range that this mapping needs to cover. These addresses
178        // are relative to the allocated VMAR, not the root VMAR.
179        let virt_addr = virt_offset.wrapping_add(mapper_relative_bias);
180
181        // If the segment is specified as larger than the data in the file, and the data in the file
182        // does not end at a page boundary, we will need to zero out the remaining memory in the
183        // page.
184        let must_write = virt_size > file_size && util::page_offset(file_size as usize) != 0;
185
186        // If this segment is writeable (and we're mapping in some VMO content, i.e. it's not
187        // all zero initialized) or the segment has a BSS section that needs to be zeroed, create
188        // a writeable clone of the VMO. Otherwise use the potentially read-only VMO passed in.
189        let vmo_to_map: &zx::Vmo;
190        let writeable_vmo: zx::Vmo;
191        if must_write || (file_size > 0 && hdr.flags().contains(elf::SegmentFlags::WRITE)) {
192            writeable_vmo = vmo
193                .create_child(
194                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
195                    file_offset as u64,
196                    util::page_end(file_size as usize) as u64,
197                )
198                .map_err(ElfLoadError::VmoCowClone)?;
199            writeable_vmo
200                .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA))
201                .map_err(ElfLoadError::SetVmoName)?;
202            // Update addresses into the VMO that will be mapped.
203            file_offset = 0;
204
205            // Zero-out the memory between the end of the filesize and the end of the page.
206            if virt_size > file_size {
207                // If the space to be zero-filled overlaps with the VMO, we need to memset it.
208                let memset_size = util::page_end(file_size as usize) - file_size as usize;
209                if memset_size > 0 {
210                    writeable_vmo
211                        .write(&vec![0u8; memset_size], file_size)
212                        .map_err(|s| ElfLoadError::VmoWrite(s))?;
213                }
214            }
215            vmo_to_map = &writeable_vmo;
216        } else {
217            vmo_to_map = vmo;
218        }
219
220        // Create the VMO part of the mapping.
221        // The VMO can be pager-backed, so include the ALLOW_FAULTS flag. ALLOW_FAULTS is a no-op
222        // if not applicable to the VMO type.
223        let flags = zx::VmarFlags::SPECIFIC
224            | zx::VmarFlags::ALLOW_FAULTS
225            | elf_to_vmar_perm_flags(&hdr.flags());
226        if file_size != 0 {
227            mapper
228                .map(
229                    virt_addr,
230                    vmo_to_map,
231                    file_offset as u64,
232                    util::page_end(file_size as usize),
233                    flags,
234                )
235                .map_err(ElfLoadError::VmarMap)?;
236        }
237
238        // If the mapping is specified as larger than the data in the file (i.e. virt_size is
239        // larger than file_size), the remainder of the space (from virt_addr + file_size to
240        // virt_addr + virt_size) is the BSS and must be filled with zeros.
241        if virt_size > file_size {
242            // The rest of the BSS is created as an anonymous vmo.
243            let bss_vmo_start = util::page_end(file_size as usize);
244            let bss_vmo_size = util::page_end(virt_size as usize) - bss_vmo_start;
245            if bss_vmo_size > 0 {
246                let anon_vmo =
247                    zx::Vmo::create(bss_vmo_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?;
248                anon_vmo
249                    .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS))
250                    .map_err(ElfLoadError::SetVmoName)?;
251                mapper
252                    .map(virt_addr + bss_vmo_start, &anon_vmo, 0, bss_vmo_size, flags)
253                    .map_err(ElfLoadError::VmarMap)?;
254            }
255        }
256    }
257    Ok(())
258}
259
260// These must not be longer than zx::sys::ZX_MAX_NAME_LEN.
261const VMO_NAME_PREFIX_BSS: &str = "bss:";
262const VMO_NAME_PREFIX_DATA: &str = "data:";
263
264// prefix length must be less than zx::sys::ZX_MAX_NAME_LEN-1 and not contain any nul bytes.
265fn vmo_name_with_prefix(name: &zx::Name, prefix: &str) -> zx::Name {
266    assert!(prefix.len() <= zx::sys::ZX_MAX_NAME_LEN - 1);
267    if name.is_empty() {
268        zx::Name::new_lossy(&format!("{prefix}<unknown ELF>"))
269    } else {
270        zx::Name::new_lossy(&format!("{prefix}{name}"))
271    }
272}
273
274fn elf_to_vmar_can_map_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
275    let mut flags = zx::VmarFlags::empty();
276    if elf_flags.contains(elf::SegmentFlags::READ) {
277        flags |= zx::VmarFlags::CAN_MAP_READ;
278    }
279    if elf_flags.contains(elf::SegmentFlags::WRITE) {
280        flags |= zx::VmarFlags::CAN_MAP_WRITE;
281    }
282    if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
283        flags |= zx::VmarFlags::CAN_MAP_EXECUTE | zx::VmarFlags::CAN_MAP_READ;
284    }
285    flags
286}
287
288fn elf_to_vmar_perm_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags {
289    let mut flags = zx::VmarFlags::empty();
290    if elf_flags.contains(elf::SegmentFlags::READ) {
291        flags |= zx::VmarFlags::PERM_READ;
292    }
293    if elf_flags.contains(elf::SegmentFlags::WRITE) {
294        flags |= zx::VmarFlags::PERM_WRITE;
295    }
296    if elf_flags.contains(elf::SegmentFlags::EXECUTE) {
297        flags |= zx::VmarFlags::PERM_EXECUTE | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED;
298    }
299    flags
300}
301
302#[cfg(test)]
303mod tests {
304    use super::*;
305    use crate::elf_parse;
306    use assert_matches::assert_matches;
307    use fidl::HandleBased;
308    use lazy_static::lazy_static;
309    use std::cell::RefCell;
310    use std::mem::size_of;
311
312    #[test]
313    fn test_vmo_name_with_prefix() {
314        let empty_vmo_name = zx::Name::default();
315        let short_vmo_name = zx::Name::new("short_vmo_name").unwrap();
316        let max_vmo_name = zx::Name::new("a_great_maximum_length_vmo_name").unwrap();
317
318        assert_eq!(vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS), "bss:<unknown ELF>");
319        assert_eq!(
320            vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS),
321            "bss:short_vmo_name",
322        );
323        assert_eq!(
324            vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS),
325            "bss:a_great_maximum_length_vmo_",
326        );
327        assert_eq!(
328            vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA),
329            "data:a_great_maximum_length_vmo",
330        );
331
332        assert_eq!(
333            vmo_name_with_prefix(&empty_vmo_name, "a_long_vmo_name_prefix:"),
334            "a_long_vmo_name_prefix:<unknown",
335        );
336        assert_eq!(
337            vmo_name_with_prefix(&empty_vmo_name, "a_great_maximum_length_vmo_name"),
338            max_vmo_name,
339        );
340        assert_eq!(
341            vmo_name_with_prefix(&max_vmo_name, "anystringhere"),
342            "anystringherea_great_maximum_le"
343        );
344    }
345
346    #[derive(Debug)]
347    struct RecordedMapping {
348        vmo: zx::Vmo,
349        vmo_offset: u64,
350        length: usize,
351        flags: zx::VmarFlags,
352    }
353
354    /// Records which VMOs and the offset within them are to be mapped.
355    struct TrackingMapper(RefCell<Vec<RecordedMapping>>);
356
357    impl TrackingMapper {
358        fn new() -> Self {
359            Self(RefCell::new(Vec::new()))
360        }
361    }
362
363    impl IntoIterator for TrackingMapper {
364        type Item = RecordedMapping;
365        type IntoIter = std::vec::IntoIter<Self::Item>;
366
367        fn into_iter(self) -> Self::IntoIter {
368            self.0.into_inner().into_iter()
369        }
370    }
371
372    impl Mapper for TrackingMapper {
373        fn map(
374            &self,
375            vmar_offset: usize,
376            vmo: &zx::Vmo,
377            vmo_offset: u64,
378            length: usize,
379            flags: zx::VmarFlags,
380        ) -> Result<usize, zx::Status> {
381            self.0.borrow_mut().push(RecordedMapping {
382                vmo: vmo.as_handle_ref().duplicate(zx::Rights::SAME_RIGHTS).unwrap().into(),
383                vmo_offset,
384                length,
385                flags,
386            });
387            Ok(vmar_offset)
388        }
389    }
390
391    /// A basic ELF64 File header with one program header.
392    const ELF_FILE_HEADER: &elf_parse::Elf64FileHeader = &elf_parse::Elf64FileHeader {
393        ident: elf_parse::ElfIdent {
394            magic: elf_parse::ELF_MAGIC,
395            class: elf_parse::ElfClass::Elf64 as u8,
396            data: elf_parse::NATIVE_ENCODING as u8,
397            version: elf_parse::ElfVersion::Current as u8,
398            osabi: 0x00,
399            abiversion: 0x00,
400            pad: [0; 7],
401        },
402        elf_type: elf_parse::ElfType::SharedObject as u16,
403        machine: elf_parse::CURRENT_ARCH as u16,
404        version: elf_parse::ElfVersion::Current as u32,
405        entry: 0x10000,
406        phoff: size_of::<elf_parse::Elf64FileHeader>(),
407        shoff: 0,
408        flags: 0,
409        ehsize: size_of::<elf_parse::Elf64FileHeader>() as u16,
410        phentsize: size_of::<elf_parse::Elf64ProgramHeader>() as u16,
411        phnum: 1,
412        shentsize: 0,
413        shnum: 0,
414        shstrndx: 0,
415    };
416
417    // The bitwise `|` operator for `bitflags` is implemented through the `std::ops::BitOr` trait,
418    // which cannot be used in a const context. The workaround is to bitwise OR the raw bits.
419    const VMO_DEFAULT_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
420        zx::Rights::DUPLICATE.bits()
421            | zx::Rights::TRANSFER.bits()
422            | zx::Rights::READ.bits()
423            | zx::Rights::WRITE.bits()
424            | zx::Rights::MAP.bits()
425            | zx::Rights::GET_PROPERTY.bits()
426            | zx::Rights::SET_PROPERTY.bits(),
427    );
428
429    #[test]
430    fn map_read_only_with_page_unaligned_bss() {
431        const ELF_DATA: &[u8; 8] = b"FUCHSIA!";
432
433        // Contains a PT_LOAD segment where the filesz is less than memsz (BSS).
434        lazy_static! {
435            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
436            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
437                elf_parse::Elf64ProgramHeader {
438                    segment_type: elf_parse::SegmentType::Load as u32,
439                    flags: elf_parse::SegmentFlags::from_bits_truncate(
440                        elf_parse::SegmentFlags::READ.bits()
441                            | elf_parse::SegmentFlags::EXECUTE.bits(),
442                    )
443                    .bits(),
444                    offset: *PAGE_SIZE,
445                    vaddr: 0x10000,
446                    paddr: 0x10000,
447                    filesz: ELF_DATA.len() as u64,
448                    memsz: 0x100,
449                    align: *PAGE_SIZE as u64,
450                };
451        }
452        let headers = elf_parse::Elf64Headers::new_for_test(
453            ELF_FILE_HEADER,
454            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
455        );
456        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
457
458        // Fill the VMO with 0xff, so that we can verify that the BSS section is correctly zeroed.
459        let data = vec![0xff; *PAGE_SIZE * 2];
460        vmo.write(&data, 0).expect("fill VMO with 0xff");
461        // Write the PT_LOAD segment's data at the defined offset.
462        vmo.write(ELF_DATA, *PAGE_SIZE as u64).expect("write data to VMO");
463
464        // Remove the ZX_RIGHT_WRITE right. Page zeroing should happen in a COW VMO.
465        let vmo =
466            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
467
468        let mapper = TrackingMapper::new();
469        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
470
471        let mut mapping_iter = mapper.into_iter();
472
473        // Extract the VMO and offset that was supposed to be mapped.
474        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
475
476        // Read a page of data that was "mapped".
477        let mut data = vec![0; *PAGE_SIZE];
478        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read VMO");
479
480        // Construct the expected memory, which is ASCII "FUCHSIA!" followed by 0s for the rest of
481        // the page.
482        let expected = ELF_DATA
483            .into_iter()
484            .cloned()
485            .chain(std::iter::repeat(0).take(*PAGE_SIZE - ELF_DATA.len()))
486            .collect::<Vec<u8>>();
487
488        assert_eq!(&expected, &data);
489
490        // No more mappings expected.
491        assert_matches!(mapping_iter.next(), None);
492    }
493
494    #[test]
495    fn map_read_only_vmo_with_page_aligned_bss() {
496        // Contains a PT_LOAD segment where the BSS starts at a page boundary.
497        lazy_static! {
498            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
499            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
500                elf_parse::Elf64ProgramHeader {
501                    segment_type: elf_parse::SegmentType::Load as u32,
502                    flags: elf_parse::SegmentFlags::from_bits_truncate(
503                        elf_parse::SegmentFlags::READ.bits()
504                            | elf_parse::SegmentFlags::EXECUTE.bits(),
505                    )
506                    .bits(),
507                    offset: *PAGE_SIZE as usize,
508                    vaddr: 0x10000,
509                    paddr: 0x10000,
510                    filesz: *PAGE_SIZE as u64,
511                    memsz: *PAGE_SIZE as u64 * 2,
512                    align: *PAGE_SIZE as u64,
513                };
514        }
515        let headers = elf_parse::Elf64Headers::new_for_test(
516            ELF_FILE_HEADER,
517            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
518        );
519        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
520        // Fill the VMO with 0xff, so we can verify the BSS section is correctly allocated.
521        let pattern = vec![0xff; *PAGE_SIZE * 2];
522        vmo.write(&pattern, 0).expect("fill VMO with 0xff");
523
524        // Remove the ZX_RIGHT_WRITE right. Since the BSS ends at a page boundary, we shouldn't
525        // need to zero out any of the pages in this VMO.
526        let vmo =
527            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
528
529        let mapper = TrackingMapper::new();
530        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
531
532        let mut mapping_iter = mapper.into_iter();
533
534        // Verify that a COW VMO was not created, since we didn't need to write to the original VMO.
535        // We must check that KOIDs are the same, since we duplicate the handle when recording it
536        // in TrackingMapper.
537        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
538        assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
539
540        let mut data = vec![0; *PAGE_SIZE];
541
542        // Ensure the first page is from the ELF.
543        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
544        assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
545
546        let mapping = mapping_iter.next().expect("mapping from BSS VMO");
547
548        // Ensure the second page is BSS.
549        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read BSS VMO");
550        let zero = vec![0; *PAGE_SIZE];
551        assert_eq!(&data, &zero);
552
553        // No more mappings expected.
554        assert_matches!(mapping_iter.next(), None);
555    }
556
557    #[test]
558    fn map_read_only_vmo_with_no_bss() {
559        // Contains a PT_LOAD segment where there is no BSS.
560        lazy_static! {
561            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
562            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
563                elf_parse::Elf64ProgramHeader {
564                    segment_type: elf_parse::SegmentType::Load as u32,
565                    flags: elf_parse::SegmentFlags::from_bits_truncate(
566                        elf_parse::SegmentFlags::READ.bits()
567                            | elf_parse::SegmentFlags::EXECUTE.bits(),
568                    )
569                    .bits(),
570                    offset: *PAGE_SIZE as usize,
571                    vaddr: 0x10000,
572                    paddr: 0x10000,
573                    filesz: *PAGE_SIZE as u64,
574                    memsz: *PAGE_SIZE as u64,
575                    align: *PAGE_SIZE as u64,
576                };
577        }
578        let headers = elf_parse::Elf64Headers::new_for_test(
579            ELF_FILE_HEADER,
580            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
581        );
582        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
583        // Fill the VMO with 0xff, so we can verify the BSS section is correctly allocated.
584        let pattern = vec![0xff; *PAGE_SIZE * 2];
585        vmo.write(&pattern, 0).expect("fill VMO with 0xff");
586
587        // Remove the ZX_RIGHT_WRITE right. Since the BSS ends at a page boundary, we shouldn't
588        // need to zero out any of the pages in this VMO.
589        let vmo =
590            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
591
592        let mapper = TrackingMapper::new();
593        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
594
595        let mut mapping_iter = mapper.into_iter();
596
597        // Verify that a COW VMO was not created, since we didn't need to write to the original VMO.
598        // We must check that KOIDs are the same, since we duplicate the handle when recording it
599        // in TrackingMapper.
600        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
601        assert_eq!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
602
603        let mut data = vec![0; *PAGE_SIZE];
604
605        // Ensure the first page is from the ELF.
606        mapping.vmo.read(&mut data, mapping.vmo_offset).expect("read ELF VMO");
607        assert_eq!(&data, &pattern[0..*PAGE_SIZE]);
608
609        // No more mappings expected.
610        assert_matches!(mapping_iter.next(), None);
611    }
612
613    #[test]
614    fn map_read_only_vmo_with_write_flag() {
615        // Contains a PT_LOAD segment where there is no BSS.
616        lazy_static! {
617            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
618            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
619                elf_parse::Elf64ProgramHeader {
620                    segment_type: elf_parse::SegmentType::Load as u32,
621                    flags: elf_parse::SegmentFlags::from_bits_truncate(
622                        elf_parse::SegmentFlags::READ.bits()
623                            | elf_parse::SegmentFlags::WRITE.bits(),
624                    )
625                    .bits(),
626                    offset: *PAGE_SIZE as usize,
627                    vaddr: 0x10000,
628                    paddr: 0x10000,
629                    filesz: *PAGE_SIZE as u64,
630                    memsz: *PAGE_SIZE as u64,
631                    align: *PAGE_SIZE as u64,
632                };
633        }
634        let headers = elf_parse::Elf64Headers::new_for_test(
635            ELF_FILE_HEADER,
636            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
637        );
638        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
639
640        // Remove the ZX_RIGHT_WRITE right. Since the segment has a WRITE flag, a COW child VMO
641        // will be created.
642        let vmo =
643            vmo.replace_handle(VMO_DEFAULT_RIGHTS - zx::Rights::WRITE).expect("remove WRITE right");
644
645        let mapper = TrackingMapper::new();
646        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
647
648        let mut mapping_iter = mapper.into_iter();
649
650        // Verify that a COW VMO was created, since the segment had a WRITE flag.
651        // We must check that KOIDs are different, since we duplicate the handle when recording it
652        // in TrackingMapper.
653        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
654        assert_ne!(mapping.vmo.get_koid().unwrap(), vmo.get_koid().unwrap());
655
656        // Attempt to write to the VMO to ensure it has the ZX_RIGHT_WRITE right.
657        mapping.vmo.write(b"FUCHSIA!", mapping.vmo_offset).expect("write to COW VMO");
658
659        // No more mappings expected.
660        assert_matches!(mapping_iter.next(), None);
661    }
662
663    #[test]
664    fn segment_with_zero_file_size() {
665        // Contains a PT_LOAD segment whose filesz is 0.
666        lazy_static! {
667            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
668            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
669                elf_parse::Elf64ProgramHeader {
670                    segment_type: elf_parse::SegmentType::Load as u32,
671                    flags: elf_parse::SegmentFlags::from_bits_truncate(
672                        elf_parse::SegmentFlags::READ.bits()
673                            | elf_parse::SegmentFlags::WRITE.bits(),
674                    )
675                    .bits(),
676                    offset: *PAGE_SIZE as usize,
677                    vaddr: 0x10000,
678                    paddr: 0x10000,
679                    filesz: 0,
680                    memsz: 1,
681                    align: *PAGE_SIZE as u64,
682                };
683        }
684        let headers = elf_parse::Elf64Headers::new_for_test(
685            ELF_FILE_HEADER,
686            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
687        );
688        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
689
690        let mapper = TrackingMapper::new();
691        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
692        for mapping in mapper.into_iter() {
693            assert!(mapping.length != 0);
694        }
695    }
696
697    #[test]
698    fn map_execute_only_segment() {
699        lazy_static! {
700            static ref PAGE_SIZE: usize = zx::system_get_page_size() as usize;
701            static ref ELF_PROGRAM_HEADER: elf_parse::Elf64ProgramHeader =
702                elf_parse::Elf64ProgramHeader {
703                    segment_type: elf_parse::SegmentType::Load as u32,
704                    flags: elf_parse::SegmentFlags::from_bits_truncate(
705                        elf_parse::SegmentFlags::EXECUTE.bits(),
706                    )
707                    .bits(),
708                    offset: *PAGE_SIZE as usize,
709                    vaddr: 0x10000,
710                    paddr: 0x10000,
711                    filesz: 0x10,
712                    memsz: 0x10,
713                    align: *PAGE_SIZE as u64,
714                };
715        }
716        let headers = elf_parse::Elf64Headers::new_for_test(
717            ELF_FILE_HEADER,
718            Some(std::slice::from_ref(&ELF_PROGRAM_HEADER)),
719        );
720        let vmo = zx::Vmo::create(*PAGE_SIZE as u64 * 2).expect("create VMO");
721
722        let mapper = TrackingMapper::new();
723        map_elf_segments(&vmo, &headers, &mapper, 0, 0).expect("map ELF segments");
724
725        let mut mapping_iter = mapper.into_iter();
726        let mapping = mapping_iter.next().expect("mapping from ELF VMO");
727        assert_eq!(
728            mapping.flags,
729            zx::VmarFlags::SPECIFIC
730                | zx::VmarFlags::ALLOW_FAULTS
731                | zx::VmarFlags::PERM_EXECUTE
732                | zx::VmarFlags::PERM_READ_IF_XOM_UNSUPPORTED
733        );
734
735        // No more mappings expected.
736        assert_matches!(mapping_iter.next(), None);
737    }
738}