Skip to main content

starnix_core/mm/
memory.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::mm::{
6    MappingOptions, MemoryManager, PAGE_SIZE, VMEX_RESOURCE, ZX_VM_SPECIFIC_OVERWRITE,
7};
8use fuchsia_runtime::UtcClock;
9use mapped_clock::{CLOCK_SIZE, MappedClock};
10use starnix_logging::{CATEGORY_STARNIX_MM, impossible_error, set_zx_name, trace_duration};
11use starnix_uapi::errno;
12use starnix_uapi::errors::Errno;
13use std::mem::MaybeUninit;
14use std::sync::{Arc, OnceLock};
15use zerocopy::FromBytes;
16use zx::{HandleBased, Koid};
17
18// This tracks a VMO handle along with basic information about the handle.
19#[derive(Debug)]
20pub struct VmoAndBasicInfo {
21    vmo: zx::Vmo,
22    info: OnceLock<(Koid, zx::Rights)>,
23}
24
25impl PartialEq for VmoAndBasicInfo {
26    fn eq(&self, other: &Self) -> bool {
27        self.vmo == other.vmo
28    }
29}
30
31impl Eq for VmoAndBasicInfo {}
32
33impl From<zx::Vmo> for VmoAndBasicInfo {
34    fn from(vmo: zx::Vmo) -> Self {
35        Self { vmo, info: OnceLock::new() }
36    }
37}
38
39impl VmoAndBasicInfo {
40    fn get_info(&self) -> &(Koid, zx::Rights) {
41        self.info.get_or_init(|| {
42            let info = self.vmo.basic_info().map_err(impossible_error).unwrap();
43            (info.koid, info.rights)
44        })
45    }
46
47    pub fn get_koid(&self) -> Koid {
48        self.get_info().0
49    }
50
51    pub fn get_rights(&self) -> zx::Rights {
52        self.get_info().1
53    }
54}
55
56impl Drop for VmoAndBasicInfo {
57    fn drop(&mut self) {
58        #[cfg(debug_assertions)]
59        {
60            if let Some((koid, rights)) = self.info.get() {
61                if let Ok(info) = self.vmo.basic_info() {
62                    debug_assert_eq!(*koid, info.koid, "Cached KOID mismatch");
63                    debug_assert_eq!(*rights, info.rights, "Cached rights mismatch");
64                }
65            }
66        }
67    }
68}
69
70#[derive(Debug)]
71pub enum MemoryObject {
72    Vmo(VmoAndBasicInfo),
73    /// The memory object is a bpf ring buffer. The layout it represents is:
74    /// |Page1 - Page2 - Page3 .. PageN - Page3 .. PageN| where the vmo is
75    /// |Page1 - Page2 - Page3 .. PageN|
76    RingBuf(VmoAndBasicInfo),
77    /// A memory mapped clock is backed by kernel memory, not by a VMO. So
78    /// it is handled specially.  The lifecycle of this clock is:
79    /// - starts off as an empty unmapped thing.
80    /// - a MappedClock is created on `map_in_vmar`.
81    /// - a name is added on `set_zx_name`.
82    /// - most clone/resize operations return errors.
83    /// - unmapped at the end of the process lifetime.
84    MemoryMappedClock {
85        // Koid of the `utc_clock`, cached for performance.
86        koid: Koid,
87        // The UTC clock handle to map to memory. Do not use it for clock reads, use
88        // the public functions in `//src/starnix/kernel/core/time/utc.rs` instead
89        utc_clock: UtcClock,
90    },
91}
92
93impl std::cmp::Eq for MemoryObject {}
94
95// Implemented manually as `MemoryMappedClock`'s mutex is not transparent to
96// `PartialEq`.
97impl std::cmp::PartialEq for MemoryObject {
98    fn eq(&self, other: &MemoryObject) -> bool {
99        match (self, other) {
100            (MemoryObject::Vmo(info1), MemoryObject::Vmo(info2)) => info1.vmo == info2.vmo,
101            (MemoryObject::RingBuf(info1), MemoryObject::RingBuf(info2)) => info1.vmo == info2.vmo,
102            (MemoryObject::MemoryMappedClock { .. }, MemoryObject::MemoryMappedClock { .. }) => {
103                self.get_koid() == other.get_koid()
104            }
105            (_, _) => false,
106        }
107    }
108}
109
110impl From<zx::Vmo> for MemoryObject {
111    fn from(vmo: zx::Vmo) -> Self {
112        Self::Vmo(VmoAndBasicInfo::from(vmo))
113    }
114}
115
116impl From<UtcClock> for MemoryObject {
117    fn from(utc_clock: UtcClock) -> MemoryObject {
118        let koid = utc_clock.koid().expect("koid should always be readable");
119        MemoryObject::MemoryMappedClock { koid, utc_clock }
120    }
121}
122
123impl MemoryObject {
124    pub fn as_vmo(&self) -> Option<&zx::Vmo> {
125        match self {
126            Self::Vmo(info) => Some(&info.vmo),
127            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
128        }
129    }
130
131    /// Returns true if this [MemoryObject] is a memory mapped clock.
132    pub fn is_clock(&self) -> bool {
133        match self {
134            Self::Vmo(_) | Self::RingBuf(_) => false,
135            Self::MemoryMappedClock { .. } => true,
136        }
137    }
138
139    pub fn into_vmo(self) -> Option<zx::Vmo> {
140        match self {
141            Self::Vmo(info) => Some(
142                info.vmo
143                    .duplicate_handle(zx::Rights::SAME_RIGHTS)
144                    .expect("duplicate_handle failed in into_vmo"),
145            ),
146            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
147        }
148    }
149
150    pub fn get_content_size(&self) -> u64 {
151        match self {
152            Self::Vmo(info) => info.vmo.get_stream_size().map_err(impossible_error).unwrap(),
153            Self::RingBuf(_) => self.get_size(),
154            Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
155        }
156    }
157
158    pub fn set_content_size(&self, size: u64) -> Result<(), zx::Status> {
159        match self {
160            Self::Vmo(info) => info.vmo.set_stream_size(size),
161            Self::RingBuf(_) => Ok(()),
162            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
163        }
164    }
165
166    pub fn get_size(&self) -> u64 {
167        match self {
168            Self::Vmo(info) => info.vmo.get_size().map_err(impossible_error).unwrap(),
169            Self::RingBuf(info) => {
170                let base_size = info.vmo.get_size().map_err(impossible_error).unwrap();
171                (base_size - *PAGE_SIZE) * 2
172            }
173            Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
174        }
175    }
176
177    pub fn set_size(&self, size: u64) -> Result<(), zx::Status> {
178        match self {
179            Self::Vmo(info) => info.vmo.set_size(size),
180            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
181        }
182    }
183
184    pub fn create_child(
185        &self,
186        option: zx::VmoChildOptions,
187        offset: u64,
188        size: u64,
189    ) -> Result<Self, zx::Status> {
190        match self {
191            Self::Vmo(info) => info.vmo.create_child(option, offset, size).map(Self::from),
192            Self::RingBuf(info) => info
193                .vmo
194                .create_child(option, offset, size)
195                .map(|vmo| Self::RingBuf(VmoAndBasicInfo::from(vmo))),
196            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
197        }
198    }
199
200    pub fn duplicate_handle(&self, rights: zx::Rights) -> Result<Self, zx::Status> {
201        match self {
202            Self::Vmo(info) => info.vmo.duplicate_handle(rights).map(Self::from),
203            Self::RingBuf(info) => info
204                .vmo
205                .duplicate_handle(rights)
206                .map(|vmo| Self::RingBuf(VmoAndBasicInfo::from(vmo))),
207            Self::MemoryMappedClock { utc_clock, .. } => {
208                utc_clock.duplicate_handle(rights).map(|c| Self::from(c))
209            }
210        }
211    }
212
213    pub fn read(&self, data: &mut [u8], offset: u64) -> Result<(), zx::Status> {
214        match self {
215            Self::Vmo(info) => info.vmo.read(data, offset),
216            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
217        }
218    }
219
220    pub fn read_to_array<T: Copy + FromBytes, const N: usize>(
221        &self,
222        offset: u64,
223    ) -> Result<[T; N], zx::Status> {
224        match self {
225            Self::Vmo(info) => info.vmo.read_to_array(offset),
226            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
227            // There does not seem to be an API that allows this read.
228            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
229        }
230    }
231
232    pub fn read_to_vec(&self, offset: u64, length: u64) -> Result<Vec<u8>, zx::Status> {
233        match self {
234            Self::Vmo(info) => info.vmo.read_to_vec(offset, length),
235            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
236            // See the note in `read_to_array` above.
237            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
238        }
239    }
240
241    pub fn read_uninit<'a>(
242        &self,
243        data: &'a mut [MaybeUninit<u8>],
244        offset: u64,
245    ) -> Result<&'a mut [u8], zx::Status> {
246        match self {
247            Self::Vmo(info) => info.vmo.read_uninit(data, offset),
248            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
249            // See the note in `read_to_array` above.
250            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
251        }
252    }
253
254    /// Reads from the memory.
255    ///
256    /// # Safety
257    ///
258    /// Callers must guarantee that the buffer is valid to write to.
259    ///
260    /// # Errors
261    ///
262    /// Returns `zx::Status::NOT_SUPPORTED` where unsupported.
263    pub unsafe fn read_raw(
264        &self,
265        buffer: *mut u8,
266        buffer_length: usize,
267        offset: u64,
268    ) -> Result<(), zx::Status> {
269        match self {
270            #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
271            Self::Vmo(info) => unsafe { info.vmo.read_raw(buffer, buffer_length, offset) },
272            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
273            // See the note in `read_to_array` above.
274            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
275        }
276    }
277
278    /// Write to memory.
279    ///
280    /// # Errors
281    ///
282    /// Returns `zx::Status::NOT_SUPPORTED` for read-only memory.
283    pub fn write(&self, data: &[u8], offset: u64) -> Result<(), zx::Status> {
284        match self {
285            Self::Vmo(info) => info.vmo.write(data, offset),
286            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
287        }
288    }
289
290    /// Returns the koid of the underlying memory-like object.
291    ///
292    /// Should be cheap to call frequently.
293    pub fn get_koid(&self) -> Koid {
294        match self {
295            Self::Vmo(info) => info.get_koid(),
296            Self::RingBuf(info) => info.get_koid(),
297            Self::MemoryMappedClock { koid, .. } => *koid,
298        }
299    }
300
301    /// Returns the rights of the underlying memory-like object.
302    pub fn get_rights(&self) -> zx::Rights {
303        match self {
304            Self::Vmo(info) => info.get_rights(),
305            Self::RingBuf(info) => info.get_rights(),
306            Self::MemoryMappedClock { utc_clock, .. } => {
307                utc_clock.basic_info().map_err(impossible_error).unwrap().rights
308            }
309        }
310    }
311
312    /// Returns `zx::VmoInfo` for a memory object that supports it.
313    ///
314    /// # Panics
315    ///
316    /// Calling `info` on a `MemoryObject` that is not represented by a VMO
317    /// will panic. To avoid this in code, call `is_clock` before attempting.
318    pub fn info(&self) -> Result<zx::VmoInfo, Errno> {
319        match self {
320            Self::Vmo(info) | Self::RingBuf(info) => info.vmo.info().map_err(|_| errno!(EIO)),
321            // Use `is_clock` to avoid calling info on a clock.
322            Self::MemoryMappedClock { .. } => {
323                panic!("info() is not supported on a memory mapped clock")
324            }
325        }
326    }
327
328    pub fn set_zx_name(&self, name: &[u8]) {
329        match self {
330            Self::Vmo(info) | Self::RingBuf(info) => set_zx_name(&info.vmo, name),
331            Self::MemoryMappedClock { .. } => {
332                // The memory mapped clock is a singleton, so it does not
333                // seem appropriate to give it a zx name.
334            }
335        }
336    }
337
338    pub fn with_zx_name(self, name: &[u8]) -> Self {
339        self.set_zx_name(name);
340        self
341    }
342
343    pub fn op_range(
344        &self,
345        op: zx::VmoOp,
346        mut offset: u64,
347        mut size: u64,
348    ) -> Result<(), zx::Status> {
349        match self {
350            Self::Vmo(info) => info.vmo.op_range(op, offset, size),
351            Self::RingBuf(info) => {
352                let vmo_size = info.vmo.get_size().map_err(impossible_error).unwrap();
353                let data_size = vmo_size - (2 * *PAGE_SIZE);
354                let memory_size = vmo_size + data_size;
355                if offset + size > memory_size {
356                    return Err(zx::Status::OUT_OF_RANGE);
357                }
358                // If `offset` is greater than `vmo_size`, the operation is equivalent to the one
359                // done on the first part of the memory range.
360                if offset >= vmo_size {
361                    offset -= data_size;
362                }
363                // If the operation spill over the end if the vmo, it must be done on the start of
364                // the data part of the vmo.
365                if offset + size > vmo_size {
366                    info.vmo.op_range(op, 2 * *PAGE_SIZE, offset + size - vmo_size)?;
367                    size = vmo_size - offset;
368                }
369                info.vmo.op_range(op, offset, size)
370            }
371            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
372        }
373    }
374
375    pub fn replace_as_executable(self, vmex: &zx::Resource) -> Result<Self, zx::Status> {
376        match self {
377            Self::Vmo(info) => {
378                let vmo = info.vmo.duplicate_handle(zx::Rights::SAME_RIGHTS)?;
379                let exec_vmo = vmo.replace_as_executable(vmex)?;
380                Ok(Self::from(exec_vmo))
381            }
382            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
383        }
384    }
385
386    pub fn map_in_vmar(
387        &self,
388        vmar: &zx::Vmar,
389        vmar_offset: usize,
390        mut memory_offset: u64,
391        len: usize,
392        flags: zx::VmarFlags,
393    ) -> Result<usize, zx::Status> {
394        match self {
395            Self::Vmo(info) => vmar.map(vmar_offset, &info.vmo, memory_offset, len, flags),
396            Self::RingBuf(info) => {
397                let vmo_size = info.vmo.get_size().map_err(impossible_error).unwrap();
398                let data_size = vmo_size - (2 * *PAGE_SIZE);
399                let memory_size = vmo_size + data_size;
400                if memory_offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?
401                    > memory_size
402                {
403                    return Err(zx::Status::OUT_OF_RANGE);
404                }
405                // If `memory_offset` is greater than `vmo_size`, the operation is equivalent to
406                // the one done on the first part of the memory range.
407                if memory_offset >= vmo_size {
408                    memory_offset -= data_size;
409                }
410                // Map the vmo for the full length. This ensures the kernel will choose a range
411                // that can accommodate the full length so that the second mapping will not erase
412                // another mapping.
413                let result = vmar.map(
414                    vmar_offset,
415                    &info.vmo,
416                    memory_offset,
417                    len,
418                    flags | zx::VmarFlags::ALLOW_FAULTS,
419                )?;
420                // The maximal amount of data that can have been mapped from the vmo with the
421                // previous operation.
422                let max_mapped_len = (vmo_size - memory_offset) as usize;
423                // If more data is needed, the data part of the vmo must be mapped again, replacing
424                // the part of the previous mapping that contained no data.
425                if len > max_mapped_len {
426                    let vmar_info = vmar.info().map_err(|_| errno!(EIO))?;
427                    let base_address = vmar_info.base;
428                    // The request should map the data part of the vmo a second time
429                    let second_mapping_address = vmar
430                        .map(
431                            result + max_mapped_len - base_address,
432                            &info.vmo,
433                            2 * *PAGE_SIZE,
434                            len - max_mapped_len,
435                            flags | ZX_VM_SPECIFIC_OVERWRITE,
436                        )
437                        .expect("Mapping should not fail as the space has been reserved");
438                    debug_assert_eq!(second_mapping_address, result + max_mapped_len);
439                }
440                Ok(result)
441            }
442            Self::MemoryMappedClock { utc_clock, .. } => {
443                // The memory mapped clock API only allows memory offset of 0, and a page-sized
444                // length of the mapping. No offset or partial mappings are allowed.
445                assert_eq!(0, memory_offset, "memory mapped clock must be at memory offset 0");
446
447                // We don't need to remember this, since vmar will know how to unmap it.
448                let memory_mapped_clock = MappedClock::try_new_without_unmap(
449                    &utc_clock,
450                    vmar,
451                    flags,
452                    vmar_offset as u64,
453                )?;
454                Ok(memory_mapped_clock.raw_addr())
455            }
456        }
457    }
458
459    pub fn memmove(
460        &self,
461        options: zx::TransferDataOptions,
462        dst_offset: u64,
463        src_offset: u64,
464        size: u64,
465    ) -> Result<(), zx::Status> {
466        match self {
467            Self::Vmo(info) => {
468                info.vmo.transfer_data(options, dst_offset, size, &info.vmo, src_offset)
469            }
470            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
471        }
472    }
473
474    pub fn clone_memory(
475        self: &Arc<Self>,
476        rights: zx::Rights,
477        options: MappingOptions,
478    ) -> Result<Arc<Self>, Errno> {
479        if self.is_clock() {
480            return Err(errno!(ENOTSUP, "clone_memory not supported on memory mapped clock"));
481        }
482
483        // Non-anonymous memory is pager-backed, and we can clone it if we don't need write
484        // rights.
485        Ok(if !options.contains(MappingOptions::ANONYMOUS) && !rights.contains(zx::Rights::WRITE) {
486            self.clone()
487        } else {
488            trace_duration!(CATEGORY_STARNIX_MM, "pager_backed_memory_snapshot");
489            let mut cloned_memory = self
490                .create_child(
491                    zx::VmoChildOptions::SNAPSHOT_MODIFIED | zx::VmoChildOptions::RESIZABLE,
492                    0,
493                    self.get_size(),
494                )
495                .map_err(MemoryManager::get_errno_for_map_err)?;
496            if rights.contains(zx::Rights::EXECUTE) {
497                cloned_memory = cloned_memory
498                    .replace_as_executable(&VMEX_RESOURCE)
499                    .map_err(impossible_error)?;
500            }
501
502            Arc::new(cloned_memory)
503        })
504    }
505}