Skip to main content

starnix_core/mm/
memory.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::mm::{
6    MappingOptions, MemoryManager, PAGE_SIZE, VMEX_RESOURCE, ZX_VM_SPECIFIC_OVERWRITE,
7};
8use fuchsia_runtime::UtcClock;
9use mapped_clock::{CLOCK_SIZE, MappedClock};
10use starnix_logging::{impossible_error, set_zx_name};
11use starnix_uapi::errno;
12use starnix_uapi::errors::Errno;
13use std::mem::MaybeUninit;
14use std::sync::Arc;
15use zerocopy::FromBytes;
16use zx::{HandleBased, Koid};
17
18#[derive(Debug)]
19pub enum MemoryObject {
20    Vmo(zx::Vmo),
21    /// The memory object is a bpf ring buffer. The layout it represents is:
22    /// |Page1 - Page2 - Page3 .. PageN - Page3 .. PageN| where the vmo is
23    /// |Page1 - Page2 - Page3 .. PageN|
24    RingBuf(zx::Vmo),
25    /// A memory mapped clock is backed by kernel memory, not by a VMO. So
26    /// it is handled specially.  The lifecycle of this clock is:
27    /// - starts off as an empty unmapped thing.
28    /// - a MappedClock is created on `map_in_vmar`.
29    /// - a name is added on `set_zx_name`.
30    /// - most clone/resize operations return errors.
31    /// - unmapped at the end of the process lifetime.
32    MemoryMappedClock {
33        // Koid of the `utc_clock`, cached for performance.
34        koid: Koid,
35        // The UTC clock handle to map to memory. Do not use it for clock reads, use
36        // the public functions in `//src/starnix/kernel/core/time/utc.rs` instead
37        utc_clock: UtcClock,
38    },
39}
40
41impl std::cmp::Eq for MemoryObject {}
42
43// Implemented manually as `MemoryMappedClock`'s mutex is not transparent to
44// `PartialEq`.
45impl std::cmp::PartialEq for MemoryObject {
46    fn eq(&self, other: &MemoryObject) -> bool {
47        match (self, other) {
48            (MemoryObject::Vmo(vmo1), MemoryObject::Vmo(vmo2)) => vmo1 == vmo2,
49            (MemoryObject::RingBuf(vmo1), MemoryObject::RingBuf(vmo2)) => vmo1 == vmo2,
50            (MemoryObject::MemoryMappedClock { .. }, MemoryObject::MemoryMappedClock { .. }) => {
51                self.get_koid() == other.get_koid()
52            }
53            (_, _) => false,
54        }
55    }
56}
57
58impl From<zx::Vmo> for MemoryObject {
59    fn from(vmo: zx::Vmo) -> Self {
60        Self::Vmo(vmo)
61    }
62}
63
64impl From<UtcClock> for MemoryObject {
65    fn from(utc_clock: UtcClock) -> MemoryObject {
66        let koid = utc_clock.koid().expect("koid should always be readable");
67        MemoryObject::MemoryMappedClock { koid, utc_clock }
68    }
69}
70
71impl MemoryObject {
72    pub fn as_vmo(&self) -> Option<&zx::Vmo> {
73        match self {
74            Self::Vmo(vmo) => Some(&vmo),
75            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
76        }
77    }
78
79    /// Returns true if this [MemoryObject] is a memory mapped clock.
80    pub fn is_clock(&self) -> bool {
81        match self {
82            Self::Vmo(_) | Self::RingBuf(_) => false,
83            Self::MemoryMappedClock { .. } => true,
84        }
85    }
86
87    pub fn into_vmo(self) -> Option<zx::Vmo> {
88        match self {
89            Self::Vmo(vmo) => Some(vmo),
90            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => None,
91        }
92    }
93
94    pub fn get_content_size(&self) -> u64 {
95        match self {
96            Self::Vmo(vmo) => vmo.get_stream_size().map_err(impossible_error).unwrap(),
97            Self::RingBuf(_) => self.get_size(),
98            Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
99        }
100    }
101
102    pub fn set_content_size(&self, size: u64) -> Result<(), zx::Status> {
103        match self {
104            Self::Vmo(vmo) => vmo.set_stream_size(size),
105            Self::RingBuf(_) => Ok(()),
106            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
107        }
108    }
109
110    pub fn get_size(&self) -> u64 {
111        match self {
112            Self::Vmo(vmo) => vmo.get_size().map_err(impossible_error).unwrap(),
113            Self::RingBuf(vmo) => {
114                let base_size = vmo.get_size().map_err(impossible_error).unwrap();
115                (base_size - *PAGE_SIZE) * 2
116            }
117            Self::MemoryMappedClock { .. } => CLOCK_SIZE as u64,
118        }
119    }
120
121    pub fn set_size(&self, size: u64) -> Result<(), zx::Status> {
122        match self {
123            Self::Vmo(vmo) => vmo.set_size(size),
124            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
125        }
126    }
127
128    pub fn create_child(
129        &self,
130        option: zx::VmoChildOptions,
131        offset: u64,
132        size: u64,
133    ) -> Result<Self, zx::Status> {
134        match self {
135            Self::Vmo(vmo) => vmo.create_child(option, offset, size).map(Self::from),
136            Self::RingBuf(vmo) => vmo.create_child(option, offset, size).map(Self::RingBuf),
137            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
138        }
139    }
140
141    pub fn duplicate_handle(&self, rights: zx::Rights) -> Result<Self, zx::Status> {
142        match self {
143            Self::Vmo(vmo) => vmo.duplicate_handle(rights).map(Self::from),
144            Self::RingBuf(vmo) => vmo.duplicate_handle(rights).map(Self::RingBuf),
145            Self::MemoryMappedClock { utc_clock, .. } => {
146                utc_clock.duplicate_handle(rights).map(|c| Self::from(c))
147            }
148        }
149    }
150
151    pub fn read(&self, data: &mut [u8], offset: u64) -> Result<(), zx::Status> {
152        match self {
153            Self::Vmo(vmo) => vmo.read(data, offset),
154            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
155        }
156    }
157
158    pub fn read_to_array<T: Copy + FromBytes, const N: usize>(
159        &self,
160        offset: u64,
161    ) -> Result<[T; N], zx::Status> {
162        match self {
163            Self::Vmo(vmo) => vmo.read_to_array(offset),
164            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
165            // There does not seem to be an API that allows this read.
166            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
167        }
168    }
169
170    pub fn read_to_vec(&self, offset: u64, length: u64) -> Result<Vec<u8>, zx::Status> {
171        match self {
172            Self::Vmo(vmo) => vmo.read_to_vec(offset, length),
173            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
174            // See the note in `read_to_array` above.
175            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
176        }
177    }
178
179    pub fn read_uninit<'a>(
180        &self,
181        data: &'a mut [MaybeUninit<u8>],
182        offset: u64,
183    ) -> Result<&'a mut [u8], zx::Status> {
184        match self {
185            Self::Vmo(vmo) => vmo.read_uninit(data, offset),
186            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
187            // See the note in `read_to_array` above.
188            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
189        }
190    }
191
192    /// Reads from the memory.
193    ///
194    /// # Safety
195    ///
196    /// Callers must guarantee that the buffer is valid to write to.
197    ///
198    /// # Errors
199    ///
200    /// Returns `zx::Status::NOT_SUPPORTED` where unsupported.
201    pub unsafe fn read_raw(
202        &self,
203        buffer: *mut u8,
204        buffer_length: usize,
205        offset: u64,
206    ) -> Result<(), zx::Status> {
207        match self {
208            #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
209            Self::Vmo(vmo) => unsafe { vmo.read_raw(buffer, buffer_length, offset) },
210            Self::RingBuf(_) => Err(zx::Status::NOT_SUPPORTED),
211            // See the note in `read_to_array` above.
212            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
213        }
214    }
215
216    /// Write to memory.
217    ///
218    /// # Errors
219    ///
220    /// Returns `zx::Status::NOT_SUPPORTED` for read-only memory.
221    pub fn write(&self, data: &[u8], offset: u64) -> Result<(), zx::Status> {
222        match self {
223            Self::Vmo(vmo) => vmo.write(data, offset),
224            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
225        }
226    }
227
228    /// Returns the generic basic handle info.
229    pub fn basic_info(&self) -> zx::HandleBasicInfo {
230        match self {
231            Self::Vmo(vmo) | Self::RingBuf(vmo) => {
232                vmo.basic_info().map_err(impossible_error).unwrap()
233            }
234            Self::MemoryMappedClock { utc_clock, .. } => {
235                utc_clock.basic_info().map_err(impossible_error).unwrap()
236            }
237        }
238    }
239
240    /// Returns the koid of the underlying memory-like object.
241    ///
242    /// Should be cheap to call frequently.
243    pub fn get_koid(&self) -> Koid {
244        match self {
245            Self::Vmo(_) | Self::RingBuf(_) => self.basic_info().koid,
246            Self::MemoryMappedClock { koid, .. } => *koid,
247        }
248    }
249
250    /// Returns `zx::VmoInfo` for a memory object that supports it.
251    ///
252    /// # Panics
253    ///
254    /// Calling `info` on a `MemoryObject` that is not represented by a VMO
255    /// will panic. To avoid this in code, call `is_clock` before attempting.
256    pub fn info(&self) -> Result<zx::VmoInfo, Errno> {
257        match self {
258            Self::Vmo(vmo) | Self::RingBuf(vmo) => vmo.info().map_err(|_| errno!(EIO)),
259            // Use `is_clock` to avoid calling info on a clock.
260            Self::MemoryMappedClock { .. } => {
261                panic!("info() is not supported on a memory mapped clock")
262            }
263        }
264    }
265
266    pub fn set_zx_name(&self, name: &[u8]) {
267        match self {
268            Self::Vmo(vmo) | Self::RingBuf(vmo) => set_zx_name(vmo, name),
269            Self::MemoryMappedClock { .. } => {
270                // The memory mapped clock is a singleton, so it does not
271                // seem appropriate to give it a zx name.
272            }
273        }
274    }
275
276    pub fn with_zx_name(self, name: &[u8]) -> Self {
277        self.set_zx_name(name);
278        self
279    }
280
281    pub fn op_range(
282        &self,
283        op: zx::VmoOp,
284        mut offset: u64,
285        mut size: u64,
286    ) -> Result<(), zx::Status> {
287        match self {
288            Self::Vmo(vmo) => vmo.op_range(op, offset, size),
289            Self::RingBuf(vmo) => {
290                let vmo_size = vmo.get_size().map_err(impossible_error).unwrap();
291                let data_size = vmo_size - (2 * *PAGE_SIZE);
292                let memory_size = vmo_size + data_size;
293                if offset + size > memory_size {
294                    return Err(zx::Status::OUT_OF_RANGE);
295                }
296                // If `offset` is greater than `vmo_size`, the operation is equivalent to the one
297                // done on the first part of the memory range.
298                if offset >= vmo_size {
299                    offset -= data_size;
300                }
301                // If the operation spill over the end if the vmo, it must be done on the start of
302                // the data part of the vmo.
303                if offset + size > vmo_size {
304                    vmo.op_range(op, 2 * *PAGE_SIZE, offset + size - vmo_size)?;
305                    size = vmo_size - offset;
306                }
307                vmo.op_range(op, offset, size)
308            }
309            Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
310        }
311    }
312
313    pub fn replace_as_executable(self, vmex: &zx::Resource) -> Result<Self, zx::Status> {
314        match self {
315            Self::Vmo(vmo) => vmo.replace_as_executable(vmex).map(Self::from),
316            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
317        }
318    }
319
320    pub fn map_in_vmar(
321        &self,
322        vmar: &zx::Vmar,
323        vmar_offset: usize,
324        mut memory_offset: u64,
325        len: usize,
326        flags: zx::VmarFlags,
327    ) -> Result<usize, zx::Status> {
328        match self {
329            Self::Vmo(vmo) => vmar.map(vmar_offset, vmo, memory_offset, len, flags),
330            Self::RingBuf(vmo) => {
331                let vmo_size = vmo.get_size().map_err(impossible_error).unwrap();
332                let data_size = vmo_size - (2 * *PAGE_SIZE);
333                let memory_size = vmo_size + data_size;
334                if memory_offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?
335                    > memory_size
336                {
337                    return Err(zx::Status::OUT_OF_RANGE);
338                }
339                // If `memory_offset` is greater than `vmo_size`, the operation is equivalent to
340                // the one done on the first part of the memory range.
341                if memory_offset >= vmo_size {
342                    memory_offset -= data_size;
343                }
344                // Map the vmo for the full length. This ensures the kernel will choose a range
345                // that can accommodate the full length so that the second mapping will not erase
346                // another mapping.
347                let result = vmar.map(
348                    vmar_offset,
349                    vmo,
350                    memory_offset,
351                    len,
352                    flags | zx::VmarFlags::ALLOW_FAULTS,
353                )?;
354                // The maximal amount of data that can have been mapped from the vmo with the
355                // previous operation.
356                let max_mapped_len = (vmo_size - memory_offset) as usize;
357                // If more data is needed, the data part of the vmo must be mapped again, replacing
358                // the part of the previous mapping that contained no data.
359                if len > max_mapped_len {
360                    let vmar_info = vmar.info().map_err(|_| errno!(EIO))?;
361                    let base_address = vmar_info.base;
362                    // The request should map the data part of the vmo a second time
363                    let second_mapping_address = vmar
364                        .map(
365                            result + max_mapped_len - base_address,
366                            vmo,
367                            2 * *PAGE_SIZE,
368                            len - max_mapped_len,
369                            flags | ZX_VM_SPECIFIC_OVERWRITE,
370                        )
371                        .expect("Mapping should not fail as the space has been reserved");
372                    debug_assert_eq!(second_mapping_address, result + max_mapped_len);
373                }
374                Ok(result)
375            }
376            Self::MemoryMappedClock { utc_clock, .. } => {
377                // The memory mapped clock API only allows memory offset of 0, and a page-sized
378                // length of the mapping. No offset or partial mappings are allowed.
379                assert_eq!(0, memory_offset, "memory mapped clock must be at memory offset 0");
380
381                // We don't need to remember this, since vmar will know how to unmap it.
382                let memory_mapped_clock = MappedClock::try_new_without_unmap(
383                    &utc_clock,
384                    vmar,
385                    flags,
386                    vmar_offset as u64,
387                )?;
388                Ok(memory_mapped_clock.raw_addr())
389            }
390        }
391    }
392
393    pub fn memmove(
394        &self,
395        options: zx::TransferDataOptions,
396        dst_offset: u64,
397        src_offset: u64,
398        size: u64,
399    ) -> Result<(), zx::Status> {
400        match self {
401            Self::Vmo(vmo) => vmo.transfer_data(options, dst_offset, size, vmo, src_offset),
402            Self::RingBuf(_) | Self::MemoryMappedClock { .. } => Err(zx::Status::NOT_SUPPORTED),
403        }
404    }
405
406    pub fn clone_memory(
407        self: &Arc<Self>,
408        rights: zx::Rights,
409        options: MappingOptions,
410    ) -> Result<Arc<Self>, Errno> {
411        if self.is_clock() {
412            return Err(errno!(ENOTSUP, "clone_memory not supported on memory mapped clock"));
413        }
414
415        // Non-anonymous memory is pager-backed, and we can clone it if we don't need write
416        // rights.
417        Ok(if !options.contains(MappingOptions::ANONYMOUS) && !rights.contains(zx::Rights::WRITE) {
418            self.clone()
419        } else {
420            let mut cloned_memory = self
421                .create_child(
422                    zx::VmoChildOptions::SNAPSHOT_MODIFIED | zx::VmoChildOptions::RESIZABLE,
423                    0,
424                    self.get_size(),
425                )
426                .map_err(MemoryManager::get_errno_for_map_err)?;
427            if rights.contains(zx::Rights::EXECUTE) {
428                cloned_memory = cloned_memory
429                    .replace_as_executable(&VMEX_RESOURCE)
430                    .map_err(impossible_error)?;
431            }
432
433            Arc::new(cloned_memory)
434        })
435    }
436}