1use crate::mm::{MemoryManager, PAGE_SIZE, VMEX_RESOURCE, ZX_VM_SPECIFIC_OVERWRITE};
6use fuchsia_runtime::UtcClock;
7use mapped_clock::{CLOCK_SIZE, MappedClock};
8use starnix_logging::{impossible_error, set_zx_name};
9use starnix_uapi::errno;
10use starnix_uapi::errors::Errno;
11use std::mem::MaybeUninit;
12use std::sync::Arc;
13use zerocopy::FromBytes;
14use zx::{AsHandleRef, HandleBased, Koid};
15
16#[derive(Debug)]
17pub enum MemoryObject {
18 Vmo(zx::Vmo),
19 RingBuf(zx::Vmo),
23 MemoryMappedClock {
31 koid: Koid,
33 utc_clock: UtcClock,
36 },
37}
38
39impl std::cmp::Eq for MemoryObject {}
40
41impl std::cmp::PartialEq for MemoryObject {
44 fn eq(&self, other: &MemoryObject) -> bool {
45 match (self, other) {
46 (MemoryObject::Vmo(vmo1), MemoryObject::Vmo(vmo2)) => vmo1 == vmo2,
47 (MemoryObject::RingBuf(vmo1), MemoryObject::RingBuf(vmo2)) => vmo1 == vmo2,
48 (MemoryObject::MemoryMappedClock { .. }, MemoryObject::MemoryMappedClock { .. }) => {
49 self.get_koid() == other.get_koid()
50 }
51 (_, _) => false,
52 }
53 }
54}
55
56impl From<zx::Vmo> for MemoryObject {
57 fn from(vmo: zx::Vmo) -> Self {
58 Self::Vmo(vmo)
59 }
60}
61
62impl From<UtcClock> for MemoryObject {
63 fn from(utc_clock: UtcClock) -> MemoryObject {
64 let koid = utc_clock.as_handle_ref().get_koid().expect("koid should always be readable");
65 MemoryObject::MemoryMappedClock { koid, utc_clock }
66 }
67}
68
69impl MemoryObject {
70 pub fn as_vmo(&self) -> Option<&zx::Vmo> {
71 match self {
72 Self::Vmo(vmo) => Some(&vmo),
73 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
74 }
75 }
76
77 pub fn is_clock(&self) -> bool {
79 match self {
80 Self::Vmo(_) | Self::RingBuf(_) => false,
81 Self::MemoryMappedClock { .. } => true,
82 }
83 }
84
85 pub fn into_vmo(self) -> Option<zx::Vmo> {
86 match self {
87 Self::Vmo(vmo) => Some(vmo),
88 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
89 }
90 }
91
92 pub fn get_content_size(&self) -> u64 {
93 match self {
94 Self::Vmo(vmo) => vmo.get_stream_size().map_err(impossible_error).unwrap(),
95 Self::RingBuf(_) => self.get_size(),
96 Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
97 }
98 }
99
100 pub fn set_content_size(&self, size: u64) -> Result<(), zx::Status> {
101 match self {
102 Self::Vmo(vmo) => vmo.set_stream_size(size),
103 Self::RingBuf(_) => Ok(()),
104 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
105 }
106 }
107
108 pub fn get_size(&self) -> u64 {
109 match self {
110 Self::Vmo(vmo) => vmo.get_size().map_err(impossible_error).unwrap(),
111 Self::RingBuf(vmo) => {
112 let base_size = vmo.get_size().map_err(impossible_error).unwrap();
113 (base_size - *PAGE_SIZE) * 2
114 }
115 Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
116 }
117 }
118
119 pub fn set_size(&self, size: u64) -> Result<(), zx::Status> {
120 match self {
121 Self::Vmo(vmo) => vmo.set_size(size),
122 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
123 }
124 }
125
126 pub fn create_child(
127 &self,
128 option: zx::VmoChildOptions,
129 offset: u64,
130 size: u64,
131 ) -> Result<Self, zx::Status> {
132 match self {
133 Self::Vmo(vmo) => vmo.create_child(option, offset, size).map(Self::from),
134 Self::RingBuf(vmo) => vmo.create_child(option, offset, size).map(Self::RingBuf),
135 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
136 }
137 }
138
139 pub fn duplicate_handle(&self, rights: zx::Rights) -> Result<Self, zx::Status> {
140 match self {
141 Self::Vmo(vmo) => vmo.duplicate_handle(rights).map(Self::from),
142 Self::RingBuf(vmo) => vmo.duplicate_handle(rights).map(Self::RingBuf),
143 Self::MemoryMappedClock { utc_clock, .. } => {
144 utc_clock.duplicate_handle(rights).map(|c| Self::from(c))
145 }
146 }
147 }
148
149 pub fn read(&self, data: &mut [u8], offset: u64) -> Result<(), zx::Status> {
150 match self {
151 Self::Vmo(vmo) => vmo.read(data, offset),
152 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
153 }
154 }
155
156 pub fn read_to_array<T: Copy + FromBytes, const N: usize>(
157 &self,
158 offset: u64,
159 ) -> Result<[T; N], zx::Status> {
160 match self {
161 Self::Vmo(vmo) => vmo.read_to_array(offset),
162 Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
163 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
165 }
166 }
167
168 pub fn read_to_vec(&self, offset: u64, length: u64) -> Result<Vec<u8>, zx::Status> {
169 match self {
170 Self::Vmo(vmo) => vmo.read_to_vec(offset, length),
171 Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
172 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
174 }
175 }
176
177 pub fn read_uninit<'a>(
178 &self,
179 data: &'a mut [MaybeUninit<u8>],
180 offset: u64,
181 ) -> Result<&'a mut [u8], zx::Status> {
182 match self {
183 Self::Vmo(vmo) => vmo.read_uninit(data, offset),
184 Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
185 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
187 }
188 }
189
190 pub unsafe fn read_raw(
200 &self,
201 buffer: *mut u8,
202 buffer_length: usize,
203 offset: u64,
204 ) -> Result<(), zx::Status> {
205 match self {
206 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
207 Self::Vmo(vmo) => unsafe { vmo.read_raw(buffer, buffer_length, offset) },
208 Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
209 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
211 }
212 }
213
214 pub fn write(&self, data: &[u8], offset: u64) -> Result<(), zx::Status> {
220 match self {
221 Self::Vmo(vmo) => vmo.write(data, offset),
222 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
223 }
224 }
225
226 pub fn basic_info(&self) -> zx::HandleBasicInfo {
228 match self {
229 Self::Vmo(vmo) | Self::RingBuf(vmo) => {
230 vmo.basic_info().map_err(impossible_error).unwrap()
231 }
232 Self::MemoryMappedClock { utc_clock, .. } => {
233 utc_clock.basic_info().map_err(impossible_error).unwrap()
234 }
235 }
236 }
237
238 pub fn get_koid(&self) -> Koid {
242 match self {
243 Self::Vmo(_) | Self::RingBuf(_) => self.basic_info().koid,
244 Self::MemoryMappedClock { koid, .. } => *koid,
245 }
246 }
247
248 pub fn info(&self) -> Result<zx::VmoInfo, Errno> {
255 match self {
256 Self::Vmo(vmo) | Self::RingBuf(vmo) => vmo.info().map_err(|_| errno!(EIO)),
257 Self::MemoryMappedClock { .. } => {
259 panic!("info() is not supported on a memory mapped clock")
260 }
261 }
262 }
263
264 pub fn set_zx_name(&self, name: &[u8]) {
265 match self {
266 Self::Vmo(vmo) | Self::RingBuf(vmo) => set_zx_name(vmo, name),
267 Self::MemoryMappedClock { .. } => {
268 }
271 }
272 }
273
274 pub fn with_zx_name(self, name: &[u8]) -> Self {
275 self.set_zx_name(name);
276 self
277 }
278
279 pub fn op_range(
280 &self,
281 op: zx::VmoOp,
282 mut offset: u64,
283 mut size: u64,
284 ) -> Result<(), zx::Status> {
285 match self {
286 Self::Vmo(vmo) => vmo.op_range(op, offset, size),
287 Self::RingBuf(vmo) => {
288 let vmo_size = vmo.get_size().map_err(impossible_error).unwrap();
289 let data_size = vmo_size - (2 * *PAGE_SIZE);
290 let memory_size = vmo_size + data_size;
291 if offset + size > memory_size {
292 return Err(zx::Status::OUT_OF_RANGE);
293 }
294 if offset >= vmo_size {
297 offset -= data_size;
298 }
299 if offset + size > vmo_size {
302 vmo.op_range(op, 2 * *PAGE_SIZE, offset + size - vmo_size)?;
303 size = vmo_size - offset;
304 }
305 vmo.op_range(op, offset, size)
306 }
307 Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
308 }
309 }
310
311 pub fn replace_as_executable(self, vmex: &zx::Resource) -> Result<Self, zx::Status> {
312 match self {
313 Self::Vmo(vmo) => vmo.replace_as_executable(vmex).map(Self::from),
314 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
315 }
316 }
317
318 pub fn map_in_vmar(
319 &self,
320 vmar: &zx::Vmar,
321 vmar_offset: usize,
322 mut memory_offset: u64,
323 len: usize,
324 flags: zx::VmarFlags,
325 ) -> Result<usize, zx::Status> {
326 match self {
327 Self::Vmo(vmo) => vmar.map(vmar_offset, vmo, memory_offset, len, flags),
328 Self::RingBuf(vmo) => {
329 let vmo_size = vmo.get_size().map_err(impossible_error).unwrap();
330 let data_size = vmo_size - (2 * *PAGE_SIZE);
331 let memory_size = vmo_size + data_size;
332 if memory_offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?
333 > memory_size
334 {
335 return Err(zx::Status::OUT_OF_RANGE);
336 }
337 if memory_offset >= vmo_size {
340 memory_offset -= data_size;
341 }
342 let result = vmar.map(
346 vmar_offset,
347 vmo,
348 memory_offset,
349 len,
350 flags | zx::VmarFlags::ALLOW_FAULTS,
351 )?;
352 let max_mapped_len = (vmo_size - memory_offset) as usize;
355 if len > max_mapped_len {
358 let vmar_info = vmar.info().map_err(|_| errno!(EIO))?;
359 let base_address = vmar_info.base;
360 let second_mapping_address = vmar
362 .map(
363 result + max_mapped_len - base_address,
364 vmo,
365 2 * *PAGE_SIZE,
366 len - max_mapped_len,
367 flags | ZX_VM_SPECIFIC_OVERWRITE,
368 )
369 .expect("Mapping should not fail as the space has been reserved");
370 debug_assert_eq!(second_mapping_address, result + max_mapped_len);
371 }
372 Ok(result)
373 }
374 Self::MemoryMappedClock { utc_clock, .. } => {
375 assert_eq!(0, memory_offset, "memory mapped clock must be at memory offset 0");
378
379 let memory_mapped_clock = MappedClock::try_new_without_unmap(
381 &utc_clock,
382 vmar,
383 flags,
384 vmar_offset as u64,
385 )?;
386 Ok(memory_mapped_clock.raw_addr())
387 }
388 }
389 }
390
391 pub fn memmove(
392 &self,
393 options: zx::TransferDataOptions,
394 dst_offset: u64,
395 src_offset: u64,
396 size: u64,
397 ) -> Result<(), zx::Status> {
398 match self {
399 Self::Vmo(vmo) => vmo.transfer_data(options, dst_offset, size, vmo, src_offset),
400 Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
401 }
402 }
403
404 pub fn clone_memory(self: &Arc<Self>, rights: zx::Rights) -> Result<Arc<Self>, Errno> {
405 if self.is_clock() {
406 return Err(errno!(ENOTSUP, "clone_memory not supported on memory mapped clock"));
407 }
408
409 let memory_info = self.info()?;
411 let pager_backed = memory_info.flags.contains(zx::VmoInfoFlags::PAGER_BACKED);
412 Ok(if pager_backed && !rights.contains(zx::Rights::WRITE) {
413 self.clone()
414 } else {
415 let mut cloned_memory = self
416 .create_child(
417 zx::VmoChildOptions::SNAPSHOT_MODIFIED | zx::VmoChildOptions::RESIZABLE,
418 0,
419 memory_info.size_bytes,
420 )
421 .map_err(MemoryManager::get_errno_for_map_err)?;
422 if rights.contains(zx::Rights::EXECUTE) {
423 cloned_memory = cloned_memory
424 .replace_as_executable(&VMEX_RESOURCE)
425 .map_err(impossible_error)?;
426 }
427
428 Arc::new(cloned_memory)
429 })
430 }
431}