1use std::mem::MaybeUninit;
8use std::ptr::NonNull;
9use std::sync::Arc;
10
11const DEFAULT_VMO_NAME: zx::Name = zx::Name::from_bytes_lossy(b"starnix_page_buf");
12
13#[derive(Debug)]
18pub struct PageBuf<T> {
19 vmo: zx::Vmo,
20 base: NonNull<MaybeUninit<u8>>,
21 mapped_len: usize,
22 extra_vmar_and_base: Option<(Arc<zx::Vmar>, usize)>,
23 _ty: std::marker::PhantomData<T>,
24}
25
26impl<T> PageBuf<T> {
27 pub fn new(capacity: usize) -> Result<Self, zx::Status> {
29 Self::new_internal(capacity, None)
30 }
31
32 pub fn new_with_extra_vmar(
35 capacity: usize,
36 extra_vmar: Arc<zx::Vmar>,
37 ) -> Result<Self, zx::Status> {
38 Self::new_internal(capacity, Some(extra_vmar))
39 }
40
41 fn new_internal(
42 capacity: usize,
43 extra_vmar: Option<Arc<zx::Vmar>>,
44 ) -> Result<Self, zx::Status> {
45 let capacity_bytes = capacity * std::mem::size_of::<T>();
46 let vmo = zx::Vmo::create(capacity_bytes as u64)?;
47
48 let mapped_len = capacity_bytes.next_multiple_of(zx::system_get_page_size() as usize);
49 let addr = fuchsia_runtime::vmar_root_self().map(
50 0,
51 &vmo,
52 0,
53 mapped_len,
54 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE | zx::VmarFlags::ALLOW_FAULTS,
55 )?;
56 let base =
57 NonNull::new(std::ptr::with_exposed_provenance_mut::<MaybeUninit<u8>>(addr)).unwrap();
58
59 let extra_vmar_and_base = if let Some(extra_vmar) = extra_vmar {
60 let extra_base = extra_vmar.map(0, &vmo, 0, mapped_len, zx::VmarFlags::PERM_READ)?;
61 Some((extra_vmar, extra_base))
62 } else {
63 None
64 };
65
66 let this =
67 Self { vmo, base, mapped_len, extra_vmar_and_base, _ty: std::marker::PhantomData };
68 this.set_name(&DEFAULT_VMO_NAME);
69 Ok(this)
70 }
71
72 pub fn set_name(&self, name: &zx::Name) {
74 self.vmo.set_name(name).expect("default vmo rights must include ability to set name");
75 }
76
77 pub fn len(&self) -> usize {
78 self.len_bytes() / std::mem::size_of::<T>()
79 }
80
81 pub fn len_bytes(&self) -> usize {
82 self.mapped_len
83 }
84
85 pub fn as_mut(&mut self) -> &mut [MaybeUninit<T>] {
87 assert!(
88 std::mem::align_of::<T>() <= zx::system_get_page_size() as usize,
89 "can't handle types with alignment greater than a page yet"
90 );
91
92 let bytes = unsafe { std::slice::from_raw_parts_mut(self.base.as_ptr(), self.mapped_len) };
96 let num_elems = bytes.len() / std::mem::size_of::<T>();
97 let bytes_as_t = bytes.as_mut_ptr().cast::<MaybeUninit<T>>();
98
99 unsafe { std::slice::from_raw_parts_mut(bytes_as_t, num_elems) }
102 }
103}
104
105unsafe impl<T: Send> Send for PageBuf<T> {}
107unsafe impl<T: Sync> Sync for PageBuf<T> {}
109
110impl<T> Drop for PageBuf<T> {
111 fn drop(&mut self) {
112 unsafe {
114 fuchsia_runtime::vmar_root_self()
115 .unmap(self.base.addr().into(), self.mapped_len)
116 .unwrap();
117 }
118
119 if let Some((extra_vmar, extra_base)) = &self.extra_vmar_and_base {
120 unsafe {
122 extra_vmar.unmap(*extra_base, self.mapped_len).unwrap();
123 }
124 }
125 }
126}
127
128#[cfg(test)]
129mod tests {
130 use super::*;
131 use fuchsia_runtime::vmar_root_self;
132 use zx::HandleBased;
133
134 #[track_caller]
135 fn fill_buf(buf: &mut PageBuf<[u8; 16]>) {
136 for slot in buf.as_mut() {
137 slot.write([1u8; 16]);
138 }
139 }
140
141 #[track_caller]
142 fn find_zx_mappings(
143 infos: &[zx::MapInfo],
144 addr_range: std::ops::Range<usize>,
145 ) -> Vec<(zx::Name, std::ops::Range<usize>, zx::MappingDetails)> {
146 let mut found = vec![];
147 for info in infos {
148 let info_range = info.base..info.base + info.size;
149 if addr_range.contains(&info_range.start) || addr_range.contains(&info_range.end) {
150 if let Some(mapping) = info.details().as_mapping() {
151 found.push((info.name, info_range, mapping.clone()));
152 }
153 }
154 }
155
156 found.sort_by_key(|m| m.1.start);
157 found
158 }
159
160 #[fuchsia::test]
161 fn basic() {
162 let mut buf = PageBuf::<[u8; 16]>::new(100).unwrap();
163 assert!(buf.len() >= 100);
164 fill_buf(&mut buf);
165
166 let buf_base = buf.base.addr().get();
167 let buf_len = buf.mapped_len;
168 let maps = vmar_root_self().maps_vec().unwrap();
169 let desired_range = buf_base..buf_base + buf_len;
170 let (name, range, details) = &find_zx_mappings(&maps, desired_range.clone())[0];
171
172 assert_eq!(name, &DEFAULT_VMO_NAME);
173 assert_eq!(range, &desired_range);
174 assert_eq!(details.vmo_koid, buf.vmo.koid().unwrap());
175
176 drop(buf);
178 let maps = vmar_root_self().maps_vec().unwrap();
179 assert_eq!(find_zx_mappings(&maps, desired_range), vec![]);
180 }
181
182 #[fuchsia::test]
183 fn setting_name_works() {
184 let buf = PageBuf::<[u8; 16]>::new(100).unwrap();
185 let vmo_name = zx::Name::from_bytes_lossy(b"setting_name_works");
186 buf.set_name(&vmo_name);
187
188 let buf_base = buf.base.addr().get();
189 let buf_len = buf.mapped_len;
190 let maps = vmar_root_self().maps_vec().unwrap();
191 let desired_range = buf_base..buf_base + buf_len;
192 let (name, _range, _details) = &find_zx_mappings(&maps, desired_range.clone())[0];
193
194 assert_eq!(name, &vmo_name);
195 }
196
197 #[fuchsia::test]
198 fn can_be_used_for_object_info() {
199 let (_, _, avail) = vmar_root_self().maps(&mut []).unwrap();
200 let mut buf = PageBuf::<zx::MapInfo>::new(avail * 2).unwrap();
201
202 let (maps, _, avail) = vmar_root_self().maps(buf.as_mut()).unwrap();
203 assert_eq!(maps.len(), avail, "should have consumed all of the mappings");
204 }
205
206 #[fuchsia::test]
207 fn can_be_used_for_vmo_reads() {
208 let len = 8 * zx::system_get_page_size() as usize;
209 let mut buf = PageBuf::<u8>::new(len).unwrap();
210
211 let source_contents = vec![42u8; len];
212 let source_vmo = zx::Vmo::create(len as u64).unwrap();
213 source_vmo.write(&source_contents, 0).unwrap();
214
215 let in_mapping = source_vmo.read_uninit(buf.as_mut(), 0).unwrap();
216 assert_eq!(in_mapping, source_contents);
217 }
218
219 #[fuchsia::test]
220 fn is_fixed_size() {
221 let buf = PageBuf::<[u8; 16]>::new(100).unwrap();
222 assert!(buf.len() >= 100);
223 let vmo_size = buf.vmo.get_size().unwrap();
224 assert!(vmo_size >= 100 * 16);
225 assert_eq!(buf.vmo.set_size(vmo_size * 2), Err(zx::Status::UNAVAILABLE));
226 }
227
228 #[fuchsia::test]
229 fn extra_vmar_is_mapped() {
230 let extra_vmar = Arc::new(
231 fuchsia_runtime::vmar_root_self().duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
232 );
233 let buf = PageBuf::<[u8; 16]>::new_with_extra_vmar(100, extra_vmar).unwrap();
234 let extra_base = buf.extra_vmar_and_base.as_ref().unwrap().1;
235 let expected_range = extra_base..extra_base + buf.mapped_len;
236 let maps = fuchsia_runtime::vmar_root_self().maps_vec().unwrap();
237
238 let (_name, observed_range, details) = &find_zx_mappings(&maps, expected_range.clone())[0];
239 assert!(observed_range.contains(&expected_range.start));
240 assert!(
241 observed_range.contains(&expected_range.end)
242 || observed_range.end == expected_range.end
243 );
244 assert_eq!(details.vmo_koid, buf.vmo.koid().unwrap());
245 assert_eq!(details.vmo_offset, 0);
246 assert_eq!(
247 details.mmu_flags,
248 zx::VmarFlagsExtended::PERM_READ,
249 "extra mapping must not be writeable",
250 );
251
252 drop(buf);
253 let maps = fuchsia_runtime::vmar_root_self().maps_vec().unwrap();
254 assert_eq!(
255 find_zx_mappings(&maps, expected_range),
256 vec![],
257 "extra mapping must be dropped too"
258 );
259 }
260}