1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use crate::{BlockContainer, CopyBytes, ReadBytes, WriteBytes};
67#[derive(Debug)]
8pub struct Container {
9 buffer_addr: usize,
10 vmo_size: usize,
11}
1213impl Container {
14pub fn read_and_write(size: usize) -> Result<(Self, zx::Vmo), zx::Status> {
15let vmo = zx::Vmo::create(size as u64)?;
16let flags = zx::VmarFlags::PERM_READ
17 | zx::VmarFlags::MAP_RANGE
18 | zx::VmarFlags::REQUIRE_NON_RESIZABLE
19 | zx::VmarFlags::PERM_WRITE;
20let buffer_addr = Self::map_vmo(&vmo, size, flags)?;
21Ok((Self { buffer_addr, vmo_size: size }, vmo))
22 }
2324pub fn read_only(vmo: &zx::Vmo) -> Result<Self, zx::Status> {
25let vmo_size = vmo.get_size()? as usize;
26let flags = zx::VmarFlags::PERM_READ | zx::VmarFlags::REQUIRE_NON_RESIZABLE;
27let buffer_addr = Self::map_vmo(vmo, vmo_size, flags)?;
28Ok(Self { buffer_addr, vmo_size })
29 }
3031fn map_vmo(vmo: &zx::Vmo, vmo_size: usize, flags: zx::VmarFlags) -> Result<usize, zx::Status> {
32let buffer_addr = fuchsia_runtime::vmar_root_self().map(0, vmo, 0, vmo_size, flags)?;
33Ok(buffer_addr)
34 }
35}
3637impl Drop for Container {
38fn drop(&mut self) {
39// SAFETY: The memory behind this `Container` is only accessible via references which
40 // at this point must have been invalidated and it is safe to unmap the memory.
41unsafe {
42 fuchsia_runtime::vmar_root_self()
43 .unmap(self.buffer_addr, self.vmo_size)
44 .expect("failed to unmap Container");
45 }
46 }
47}
4849impl BlockContainer for Container {
50type Data = zx::Vmo;
51type ShareableData = zx::Vmo;
5253#[inline]
54fn len(&self) -> usize {
55self.vmo_size
56 }
57}
5859impl ReadBytes for Container {
60/// Returns a slice of the given size at the given offset if one exists of the exact size.
61 /// The offset is inclusive.
62#[inline]
63fn get_slice_at(&self, offset: usize, size: usize) -> Option<&[u8]> {
64if offset >= self.len() {
65return None;
66 }
67let upper_bound = offset.checked_add(size)?;
68if upper_bound > self.len() {
69return None;
70 }
71let ptr = (self.buffer_addr + offset) as *const u8;
72// SAFETY: the checks above guarantee we have a slice of bytes with `size` elements. Since
73 // we have a shared reference to this container, we can get a shared reference to the
74 // underlying mapped memory, which lifetime is tied to our Container.
75unsafe { Some(std::slice::from_raw_parts(ptr, size)) }
76 }
77}
7879impl CopyBytes for Container {
80#[inline]
81fn copy_bytes_at(&self, offset: usize, dst: &mut [u8]) {
82if let Some(slice) = self.get_slice_at(offset, dst.len()) {
83 dst.copy_from_slice(slice);
84 }
85 }
86}
8788impl WriteBytes for Container {
89/// Returns a slice of the given size at the given offset if one exists of the exact size.
90 /// The offset is inclusive.
91#[inline]
92fn get_slice_mut_at(&mut self, offset: usize, size: usize) -> Option<&mut [u8]> {
93if offset >= self.len() {
94return None;
95 }
96let upper_bound = offset.checked_add(size)?;
97if upper_bound > self.len() {
98return None;
99 }
100let ptr = (self.buffer_addr + offset) as *mut u8;
101// SAFETY: the checks above guarantee we have a slice of bytes with `size` elements. Since
102 // we have a exclusive reference to this container, we can get a exclusive reference to the
103 // underlying mapped memory, which lifetime is tied to our Container.
104unsafe { Some(std::slice::from_raw_parts_mut(ptr, size)) }
105 }
106}
107108impl BlockContainer for zx::Vmo {
109type Data = Self;
110type ShareableData = Self;
111112#[inline]
113fn len(&self) -> usize {
114self.get_size().ok().unwrap() as usize
115 }
116}
117118impl CopyBytes for zx::Vmo {
119#[inline]
120fn copy_bytes_at(&self, offset: usize, dst: &mut [u8]) {
121self.read(dst, offset as u64).ok();
122 }
123}