1use crate::mm::memory::MemoryObject;
6use crate::mm::memory_manager::MemoryManagerState;
7use crate::mm::{
8 FaultRegisterMode, GUARD_PAGE_COUNT_FOR_GROWSDOWN_MAPPINGS, MappingOptions, PAGE_SIZE,
9 ProtectionFlags,
10};
11use crate::vfs::FileMapping;
12use crate::vfs::aio::AioContext;
13use bitflags::bitflags;
14use flyweights::FlyByteStr;
15use fuchsia_inspect::HistogramProperty;
16use starnix_uapi::errors::Errno;
17use starnix_uapi::file_mode::Access;
18use starnix_uapi::user_address::UserAddress;
19use starnix_uapi::{PROT_EXEC, PROT_READ, PROT_WRITE, errno};
20use static_assertions::const_assert_eq;
21use std::mem::MaybeUninit;
22use std::ops::Range;
23use std::sync::Arc;
24
25#[derive(Debug, Clone, Copy, PartialEq, Eq)]
26pub enum MappingMode {
27 Eager,
28 Lazy,
29}
30
31#[split_enum_storage::container] #[must_use]
38pub struct Mapping {
39 backing: MappingBacking,
41
42 flags: MappingFlags,
44
45 max_access: Access,
47
48 #[split_enum_storage::decomposed]
61 name: MappingName,
62}
63
64#[cfg(not(any(test, debug_assertions)))]
67static_assertions::assert_eq_size!(Mapping, [u8; 24]);
68
69impl Mapping {
70 pub fn new(
71 backing: MappingBacking,
72 flags: MappingFlags,
73 max_access: Access,
74 mode: MappingMode,
75 ) -> Mapping {
76 Self::with_name(backing, flags, max_access, MappingName::None, mode)
77 }
78
79 pub fn with_name(
80 backing: MappingBacking,
81 mut flags: MappingFlags,
82 max_access: Access,
83 name: MappingName,
84 mode: MappingMode,
85 ) -> Mapping {
86 flags.set(MappingFlags::MAPPED_IN_VMAR, mode == MappingMode::Eager);
87 MappingUnsplit { backing, flags, max_access, name }.decompose()
88 }
89
90 pub fn flags(&self) -> MappingFlags {
91 self.flags
92 }
93
94 pub fn mapping_mode(&self) -> MappingMode {
95 if self.flags.contains(MappingFlags::MAPPED_IN_VMAR) {
96 MappingMode::Eager
97 } else {
98 MappingMode::Lazy
99 }
100 }
101
102 pub fn set_mapping_mode(&mut self, mode: MappingMode) {
103 self.flags.set(MappingFlags::MAPPED_IN_VMAR, mode == MappingMode::Eager);
104 }
105
106 pub fn set_flags(&mut self, new_flags: MappingFlags) {
107 self.flags = new_flags;
108 }
109
110 pub fn max_access(&self) -> Access {
111 self.max_access
112 }
113
114 pub fn get_backing_internal(&self) -> &MappingBacking {
115 &self.backing
116 }
117
118 pub fn set_backing_internal(&mut self, backing: MappingBacking) {
119 self.backing = backing;
120 }
121
122 pub fn set_uffd(&mut self, mode: FaultRegisterMode) {
123 self.flags |= MappingFlags::UFFD;
124 if mode == FaultRegisterMode::MISSING {
125 self.flags |= MappingFlags::UFFD_MISSING;
126 }
127 }
128
129 pub fn clear_uffd(&mut self) {
130 self.flags = self.flags.difference(MappingFlags::UFFD | MappingFlags::UFFD_MISSING);
131 }
132
133 pub fn set_mlock(&mut self) {
134 self.flags |= MappingFlags::LOCKED;
135 }
136
137 pub fn clear_mlock(&mut self) {
138 self.flags = self.flags.difference(MappingFlags::LOCKED);
139 }
140
141 pub fn new_private_anonymous(
142 mut flags: MappingFlags,
143 name: MappingName,
144 mode: MappingMode,
145 ) -> Mapping {
146 flags.set(MappingFlags::MAPPED_IN_VMAR, mode == MappingMode::Eager);
147 MappingUnsplit {
148 backing: MappingBacking::PrivateAnonymous,
149 flags,
150 max_access: Access::rwx(),
151 name,
152 }
153 .decompose()
154 }
155
156 pub fn inflate_to_include_guard_pages(&self, range: &Range<UserAddress>) -> Range<UserAddress> {
157 let start = if self.flags.contains(MappingFlags::GROWSDOWN) {
158 range
159 .start
160 .saturating_sub(*PAGE_SIZE as usize * GUARD_PAGE_COUNT_FOR_GROWSDOWN_MAPPINGS)
161 } else {
162 range.start
163 };
164 start..range.end
165 }
166
167 pub fn address_to_offset(&self, addr: UserAddress) -> u64 {
169 match &self.backing {
170 MappingBacking::Memory(backing) => backing.address_to_offset(addr),
171 MappingBacking::PrivateAnonymous => {
172 addr.ptr() as u64
174 }
175 }
176 }
177
178 pub fn can_read(&self) -> bool {
179 self.flags.contains(MappingFlags::READ)
180 }
181
182 pub fn can_write(&self) -> bool {
183 self.flags.contains(MappingFlags::WRITE)
184 }
185
186 pub fn can_exec(&self) -> bool {
187 self.flags.contains(MappingFlags::EXEC)
188 }
189
190 pub fn private_anonymous(&self) -> bool {
191 if let MappingBacking::PrivateAnonymous = &self.backing {
192 return true;
193 }
194 !self.flags.contains(MappingFlags::SHARED) && self.flags.contains(MappingFlags::ANONYMOUS)
195 }
196
197 pub fn vm_flags(&self) -> String {
198 let mut string = String::default();
199 if self.flags.contains(MappingFlags::READ) {
203 string.push_str("rd ");
204 }
205 if self.flags.contains(MappingFlags::WRITE) {
207 string.push_str("wr ");
208 }
209 if self.flags.contains(MappingFlags::EXEC) {
211 string.push_str("ex ");
212 }
213 if self.flags.contains(MappingFlags::SHARED) && self.max_access.contains(Access::WRITE) {
215 string.push_str("sh ");
216 }
217 if self.max_access.contains(Access::READ) {
219 string.push_str("mr ");
220 }
221 if self.max_access.contains(Access::WRITE) {
223 string.push_str("mw ");
224 }
225 if self.max_access.contains(Access::EXEC) {
227 string.push_str("me ");
228 }
229 if self.flags.contains(MappingFlags::SHARED) {
231 string.push_str("ms ");
232 }
233 if self.flags.contains(MappingFlags::GROWSDOWN) {
235 string.push_str("gd ");
236 }
237 if self.flags.contains(MappingFlags::LOCKED) {
241 string.push_str("lo ");
242 }
243 if self.flags.contains(MappingFlags::DONTFORK) {
248 string.push_str("dc ");
249 }
250 if self.flags.contains(MappingFlags::DONT_EXPAND) {
252 string.push_str("de ");
253 }
254 string.push_str("ac ");
256 if self.flags.contains(MappingFlags::WIPEONFORK) {
263 string.push_str("wf ");
264 }
265 if self.flags.contains(MappingFlags::UFFD_MISSING) {
273 string.push_str("um");
274 }
275 string
278 }
279}
280
281#[derive(Debug, Eq, PartialEq, Clone)]
282pub enum MappingBacking {
283 Memory(Box<MappingBackingMemory>),
284
285 PrivateAnonymous,
286}
287
288#[derive(Debug, Eq, PartialEq, Clone, split_enum_storage::SplitStorage)]
289pub enum MappingName {
290 None,
292
293 Stack,
295
296 Heap,
298
299 Vdso,
301
302 Vvar,
304
305 File(Arc<FileMapping>),
307
308 Vma(FlyByteStr),
313
314 Ashmem(FlyByteStr),
317
318 AioContext(Arc<AioContext>),
320}
321
322#[derive(Debug, Eq, PartialEq, Clone)]
323pub struct MappingBackingMemory {
324 memory: Arc<MemoryObject>,
326
327 address_to_offset_delta: u64,
329}
330
331impl MappingBackingMemory {
332 pub fn new(base: UserAddress, memory: Arc<MemoryObject>, memory_offset: u64) -> Self {
333 let address_to_offset_delta = memory_offset.wrapping_sub(base.ptr() as u64);
334 Self { memory, address_to_offset_delta }
335 }
336
337 pub fn memory(&self) -> &Arc<MemoryObject> {
338 &self.memory
339 }
340
341 pub fn read_memory<'a>(
347 &self,
348 addr: UserAddress,
349 bytes: &'a mut [MaybeUninit<u8>],
350 ) -> Result<&'a mut [u8], Errno> {
351 self.memory.read_uninit(bytes, self.address_to_offset(addr)).map_err(|_| errno!(EFAULT))
352 }
353
354 pub fn write_memory(&self, addr: UserAddress, bytes: &[u8]) -> Result<(), Errno> {
360 self.memory.write(bytes, self.address_to_offset(addr)).map_err(|_| errno!(EFAULT))
361 }
362
363 pub fn zero(&self, addr: UserAddress, length: usize) -> Result<usize, Errno> {
364 self.memory
365 .op_range(zx::VmoOp::ZERO, self.address_to_offset(addr), length as u64)
366 .map_err(|_| errno!(EFAULT))?;
367 Ok(length)
368 }
369
370 pub fn address_to_offset(&self, addr: UserAddress) -> u64 {
372 (addr.ptr() as u64).wrapping_add(self.address_to_offset_delta)
373 }
374}
375
376bitflags! {
377 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
378 #[rustfmt::skip] pub struct MappingFlags: u16 {
380 const READ = 1 << 0; const WRITE = 1 << 1; const EXEC = 1 << 2; const SHARED = 1 << 3;
384 const ANONYMOUS = 1 << 4;
385 const LOWER_32BIT = 1 << 5;
386 const GROWSDOWN = 1 << 6;
387 const ELF_BINARY = 1 << 7;
388 const DONTFORK = 1 << 8;
389 const WIPEONFORK = 1 << 9;
390 const DONT_SPLIT = 1 << 10;
391 const DONT_EXPAND = 1 << 11;
392 const LOCKED = 1 << 12;
393 const UFFD = 1 << 13;
394 const UFFD_MISSING = 1 << 14;
395 const MAPPED_IN_VMAR = 1 << 15;
396 }
397}
398
399const_assert_eq!(MappingFlags::READ.bits(), PROT_READ as u16);
401const_assert_eq!(MappingFlags::WRITE.bits(), PROT_WRITE as u16);
402const_assert_eq!(MappingFlags::EXEC.bits(), PROT_EXEC as u16);
403
404const_assert_eq!(MappingFlags::SHARED.bits(), MappingOptions::SHARED.bits() << 3);
406const_assert_eq!(MappingFlags::ANONYMOUS.bits(), MappingOptions::ANONYMOUS.bits() << 3);
407const_assert_eq!(MappingFlags::LOWER_32BIT.bits(), MappingOptions::LOWER_32BIT.bits() << 3);
408const_assert_eq!(MappingFlags::GROWSDOWN.bits(), MappingOptions::GROWSDOWN.bits() << 3);
409const_assert_eq!(MappingFlags::ELF_BINARY.bits(), MappingOptions::ELF_BINARY.bits() << 3);
410const_assert_eq!(MappingFlags::DONTFORK.bits(), MappingOptions::DONTFORK.bits() << 3);
411const_assert_eq!(MappingFlags::WIPEONFORK.bits(), MappingOptions::WIPEONFORK.bits() << 3);
412const_assert_eq!(MappingFlags::DONT_SPLIT.bits(), MappingOptions::DONT_SPLIT.bits() << 3);
413const_assert_eq!(MappingFlags::DONT_EXPAND.bits(), MappingOptions::DONT_EXPAND.bits() << 3);
414
415impl MappingFlags {
416 pub fn access_flags(&self) -> ProtectionFlags {
417 ProtectionFlags::from_bits_truncate(
418 self.bits() as u32 & ProtectionFlags::ACCESS_FLAGS.bits(),
419 )
420 }
421
422 pub fn with_access_flags(&self, prot_flags: ProtectionFlags) -> Self {
423 let mapping_flags =
424 *self & (MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXEC).complement();
425 mapping_flags | Self::from_bits_truncate(prot_flags.access_flags().bits() as u16)
426 }
427
428 pub fn options(&self) -> MappingOptions {
429 MappingOptions::from_bits_truncate(self.bits() >> 3)
430 }
431
432 pub fn from_access_flags_and_options(
433 prot_flags: ProtectionFlags,
434 options: MappingOptions,
435 ) -> Self {
436 Self::from_bits_truncate(prot_flags.access_flags().bits() as u16)
437 | Self::from_bits_truncate(options.bits() << 3)
438 }
439}
440
441#[derive(Debug, Default)]
442pub struct MappingSummary {
443 no_kind: MappingKindSummary,
444 stack: MappingKindSummary,
445 heap: MappingKindSummary,
446 vdso: MappingKindSummary,
447 vvar: MappingKindSummary,
448 file: MappingKindSummary,
449 vma: MappingKindSummary,
450 ashmem: MappingKindSummary,
451 aiocontext: MappingKindSummary,
452
453 name_lengths: Vec<usize>,
454}
455
456impl MappingSummary {
457 pub fn add(&mut self, mm_state: &MemoryManagerState, mapping: &Mapping) {
458 let kind_summary = match mapping.name() {
459 MappingNameRef::None => &mut self.no_kind,
460 MappingNameRef::Stack => &mut self.stack,
461 MappingNameRef::Heap => &mut self.heap,
462 MappingNameRef::Vdso => &mut self.vdso,
463 MappingNameRef::Vvar => &mut self.vvar,
464 MappingNameRef::File(_) => &mut self.file,
465 MappingNameRef::Vma(name) => {
466 self.name_lengths.push(name.len());
467 &mut self.vma
468 }
469 MappingNameRef::Ashmem(name) => {
470 self.name_lengths.push(name.len());
471 &mut self.ashmem
472 }
473 MappingNameRef::AioContext(_) => &mut self.aiocontext,
474 };
475
476 kind_summary.count += 1;
477 if mapping.flags.contains(MappingFlags::SHARED) {
478 kind_summary.num_shared += 1;
479 } else {
480 kind_summary.num_private += 1;
481 }
482 match mm_state.get_mapping_backing(mapping) {
483 MappingBacking::Memory(_) => {
484 kind_summary.num_memory_objects += 1;
485 }
486 MappingBacking::PrivateAnonymous => kind_summary.num_private_anon += 1,
487 }
488 }
489
490 pub fn record(self, node: &fuchsia_inspect::Node) {
491 node.record_child("no_kind", |node| self.no_kind.record(node));
492 node.record_child("stack", |node| self.stack.record(node));
493 node.record_child("heap", |node| self.heap.record(node));
494 node.record_child("vdso", |node| self.vdso.record(node));
495 node.record_child("vvar", |node| self.vvar.record(node));
496 node.record_child("file", |node| self.file.record(node));
497 node.record_child("vma", |node| self.vma.record(node));
498 node.record_child("ashmem", |node| self.ashmem.record(node));
499 node.record_child("aiocontext", |node| self.aiocontext.record(node));
500
501 let name_lengths = node.create_uint_linear_histogram(
502 "name_lengths",
503 fuchsia_inspect::LinearHistogramParams { floor: 0, step_size: 8, buckets: 4 },
504 );
505 for l in self.name_lengths {
506 name_lengths.insert(l as u64);
507 }
508 node.record(name_lengths);
509 }
510}
511
512#[derive(Debug, Default)]
513struct MappingKindSummary {
514 count: u64,
515 num_private: u64,
516 num_shared: u64,
517 num_memory_objects: u64,
518 num_private_anon: u64,
519}
520
521impl MappingKindSummary {
522 fn record(&self, node: &fuchsia_inspect::Node) {
523 node.record_uint("count", self.count);
524 node.record_uint("num_private", self.num_private);
525 node.record_uint("num_shared", self.num_shared);
526 node.record_uint("num_memory_objects", self.num_memory_objects);
527 node.record_uint("num_private_anon", self.num_private_anon);
528 }
529}