1use crate::mm::memory::MemoryObject;
6use crate::mm::memory_manager::MemoryManagerState;
7use crate::mm::{
8 FaultRegisterMode, GUARD_PAGE_COUNT_FOR_GROWSDOWN_MAPPINGS, MappingOptions, PAGE_SIZE,
9 ProtectionFlags,
10};
11use crate::vfs::FileMapping;
12use crate::vfs::aio::AioContext;
13use bitflags::bitflags;
14use flyweights::FlyByteStr;
15use fuchsia_inspect::HistogramProperty;
16use starnix_uapi::errors::Errno;
17use starnix_uapi::file_mode::Access;
18use starnix_uapi::user_address::UserAddress;
19use starnix_uapi::{PROT_EXEC, PROT_READ, PROT_WRITE, errno};
20use static_assertions::const_assert_eq;
21use std::mem::MaybeUninit;
22use std::ops::Range;
23use std::sync::Arc;
24
25#[split_enum_storage::container] #[must_use]
32pub struct Mapping {
33 backing: MappingBacking,
35
36 flags: MappingFlags,
38
39 max_access: Access,
41
42 #[split_enum_storage::decomposed]
55 name: MappingName,
56}
57
58#[cfg(not(any(test, debug_assertions)))]
61static_assertions::assert_eq_size!(Mapping, [u8; 24]);
62
63impl Mapping {
64 pub fn new(backing: MappingBacking, flags: MappingFlags, max_access: Access) -> Mapping {
65 Self::with_name(backing, flags, max_access, MappingName::None)
66 }
67
68 pub fn with_name(
69 backing: MappingBacking,
70 flags: MappingFlags,
71 max_access: Access,
72 name: MappingName,
73 ) -> Mapping {
74 MappingUnsplit { backing, flags, max_access, name }.decompose()
75 }
76
77 pub fn flags(&self) -> MappingFlags {
78 self.flags
79 }
80
81 pub fn set_flags(&mut self, new_flags: MappingFlags) {
82 self.flags = new_flags;
83 }
84
85 pub fn max_access(&self) -> Access {
86 self.max_access
87 }
88
89 pub fn get_backing_internal(&self) -> &MappingBacking {
90 &self.backing
91 }
92
93 pub fn set_backing_internal(&mut self, backing: MappingBacking) {
94 self.backing = backing;
95 }
96
97 pub fn set_uffd(&mut self, mode: FaultRegisterMode) {
98 self.flags |= MappingFlags::UFFD;
99 if mode == FaultRegisterMode::MISSING {
100 self.flags |= MappingFlags::UFFD_MISSING;
101 }
102 }
103
104 pub fn clear_uffd(&mut self) {
105 self.flags = self.flags.difference(MappingFlags::UFFD | MappingFlags::UFFD_MISSING);
106 }
107
108 pub fn set_mlock(&mut self) {
109 self.flags |= MappingFlags::LOCKED;
110 }
111
112 pub fn clear_mlock(&mut self) {
113 self.flags = self.flags.difference(MappingFlags::LOCKED);
114 }
115
116 pub fn new_private_anonymous(flags: MappingFlags, name: MappingName) -> Mapping {
117 MappingUnsplit {
118 backing: MappingBacking::PrivateAnonymous,
119 flags,
120 max_access: Access::rwx(),
121 name,
122 }
123 .decompose()
124 }
125
126 pub fn inflate_to_include_guard_pages(&self, range: &Range<UserAddress>) -> Range<UserAddress> {
127 let start = if self.flags.contains(MappingFlags::GROWSDOWN) {
128 range
129 .start
130 .saturating_sub(*PAGE_SIZE as usize * GUARD_PAGE_COUNT_FOR_GROWSDOWN_MAPPINGS)
131 } else {
132 range.start
133 };
134 start..range.end
135 }
136
137 pub fn address_to_offset(&self, addr: UserAddress) -> u64 {
139 match &self.backing {
140 MappingBacking::Memory(backing) => backing.address_to_offset(addr),
141 MappingBacking::PrivateAnonymous => {
142 addr.ptr() as u64
144 }
145 }
146 }
147
148 pub fn can_read(&self) -> bool {
149 self.flags.contains(MappingFlags::READ)
150 }
151
152 pub fn can_write(&self) -> bool {
153 self.flags.contains(MappingFlags::WRITE)
154 }
155
156 pub fn can_exec(&self) -> bool {
157 self.flags.contains(MappingFlags::EXEC)
158 }
159
160 pub fn private_anonymous(&self) -> bool {
161 if let MappingBacking::PrivateAnonymous = &self.backing {
162 return true;
163 }
164 !self.flags.contains(MappingFlags::SHARED) && self.flags.contains(MappingFlags::ANONYMOUS)
165 }
166
167 pub fn vm_flags(&self) -> String {
168 let mut string = String::default();
169 if self.flags.contains(MappingFlags::READ) {
173 string.push_str("rd ");
174 }
175 if self.flags.contains(MappingFlags::WRITE) {
177 string.push_str("wr ");
178 }
179 if self.flags.contains(MappingFlags::EXEC) {
181 string.push_str("ex ");
182 }
183 if self.flags.contains(MappingFlags::SHARED) && self.max_access.contains(Access::WRITE) {
185 string.push_str("sh ");
186 }
187 if self.max_access.contains(Access::READ) {
189 string.push_str("mr ");
190 }
191 if self.max_access.contains(Access::WRITE) {
193 string.push_str("mw ");
194 }
195 if self.max_access.contains(Access::EXEC) {
197 string.push_str("me ");
198 }
199 if self.flags.contains(MappingFlags::SHARED) {
201 string.push_str("ms ");
202 }
203 if self.flags.contains(MappingFlags::GROWSDOWN) {
205 string.push_str("gd ");
206 }
207 if self.flags.contains(MappingFlags::LOCKED) {
211 string.push_str("lo ");
212 }
213 if self.flags.contains(MappingFlags::DONTFORK) {
218 string.push_str("dc ");
219 }
220 if self.flags.contains(MappingFlags::DONT_EXPAND) {
222 string.push_str("de ");
223 }
224 string.push_str("ac ");
226 if self.flags.contains(MappingFlags::WIPEONFORK) {
233 string.push_str("wf ");
234 }
235 if self.flags.contains(MappingFlags::UFFD_MISSING) {
243 string.push_str("um");
244 }
245 string
248 }
249}
250
251#[derive(Debug, Eq, PartialEq, Clone)]
252pub enum MappingBacking {
253 Memory(Box<MappingBackingMemory>),
254
255 PrivateAnonymous,
256}
257
258#[derive(Debug, Eq, PartialEq, Clone, split_enum_storage::SplitStorage)]
259pub enum MappingName {
260 None,
262
263 Stack,
265
266 Heap,
268
269 Vdso,
271
272 Vvar,
274
275 File(Arc<FileMapping>),
277
278 Vma(FlyByteStr),
283
284 Ashmem(FlyByteStr),
287
288 AioContext(Arc<AioContext>),
290}
291
292#[derive(Debug, Eq, PartialEq, Clone)]
293pub struct MappingBackingMemory {
294 memory: Arc<MemoryObject>,
296
297 address_to_offset_delta: u64,
299}
300
301impl MappingBackingMemory {
302 pub fn new(base: UserAddress, memory: Arc<MemoryObject>, memory_offset: u64) -> Self {
303 let address_to_offset_delta = memory_offset.wrapping_sub(base.ptr() as u64);
304 Self { memory, address_to_offset_delta }
305 }
306
307 pub fn memory(&self) -> &Arc<MemoryObject> {
308 &self.memory
309 }
310
311 pub fn read_memory<'a>(
317 &self,
318 addr: UserAddress,
319 bytes: &'a mut [MaybeUninit<u8>],
320 ) -> Result<&'a mut [u8], Errno> {
321 self.memory.read_uninit(bytes, self.address_to_offset(addr)).map_err(|_| errno!(EFAULT))
322 }
323
324 pub fn write_memory(&self, addr: UserAddress, bytes: &[u8]) -> Result<(), Errno> {
330 self.memory.write(bytes, self.address_to_offset(addr)).map_err(|_| errno!(EFAULT))
331 }
332
333 pub fn zero(&self, addr: UserAddress, length: usize) -> Result<usize, Errno> {
334 self.memory
335 .op_range(zx::VmoOp::ZERO, self.address_to_offset(addr), length as u64)
336 .map_err(|_| errno!(EFAULT))?;
337 Ok(length)
338 }
339
340 pub fn address_to_offset(&self, addr: UserAddress) -> u64 {
342 (addr.ptr() as u64).wrapping_add(self.address_to_offset_delta)
343 }
344}
345
346bitflags! {
347 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
348 #[rustfmt::skip] pub struct MappingFlags: u16 {
350 const READ = 1 << 0; const WRITE = 1 << 1; const EXEC = 1 << 2; const SHARED = 1 << 3;
354 const ANONYMOUS = 1 << 4;
355 const LOWER_32BIT = 1 << 5;
356 const GROWSDOWN = 1 << 6;
357 const ELF_BINARY = 1 << 7;
358 const DONTFORK = 1 << 8;
359 const WIPEONFORK = 1 << 9;
360 const DONT_SPLIT = 1 << 10;
361 const DONT_EXPAND = 1 << 11;
362 const LOCKED = 1 << 12;
363 const UFFD = 1 << 13;
364 const UFFD_MISSING = 1 << 14;
365 }
366}
367
368const_assert_eq!(MappingFlags::READ.bits(), PROT_READ as u16);
370const_assert_eq!(MappingFlags::WRITE.bits(), PROT_WRITE as u16);
371const_assert_eq!(MappingFlags::EXEC.bits(), PROT_EXEC as u16);
372
373const_assert_eq!(MappingFlags::SHARED.bits(), MappingOptions::SHARED.bits() << 3);
375const_assert_eq!(MappingFlags::ANONYMOUS.bits(), MappingOptions::ANONYMOUS.bits() << 3);
376const_assert_eq!(MappingFlags::LOWER_32BIT.bits(), MappingOptions::LOWER_32BIT.bits() << 3);
377const_assert_eq!(MappingFlags::GROWSDOWN.bits(), MappingOptions::GROWSDOWN.bits() << 3);
378const_assert_eq!(MappingFlags::ELF_BINARY.bits(), MappingOptions::ELF_BINARY.bits() << 3);
379const_assert_eq!(MappingFlags::DONTFORK.bits(), MappingOptions::DONTFORK.bits() << 3);
380const_assert_eq!(MappingFlags::WIPEONFORK.bits(), MappingOptions::WIPEONFORK.bits() << 3);
381const_assert_eq!(MappingFlags::DONT_SPLIT.bits(), MappingOptions::DONT_SPLIT.bits() << 3);
382const_assert_eq!(MappingFlags::DONT_EXPAND.bits(), MappingOptions::DONT_EXPAND.bits() << 3);
383
384impl MappingFlags {
385 pub fn access_flags(&self) -> ProtectionFlags {
386 ProtectionFlags::from_bits_truncate(
387 self.bits() as u32 & ProtectionFlags::ACCESS_FLAGS.bits(),
388 )
389 }
390
391 pub fn with_access_flags(&self, prot_flags: ProtectionFlags) -> Self {
392 let mapping_flags =
393 *self & (MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXEC).complement();
394 mapping_flags | Self::from_bits_truncate(prot_flags.access_flags().bits() as u16)
395 }
396
397 pub fn options(&self) -> MappingOptions {
398 MappingOptions::from_bits_truncate(self.bits() >> 3)
399 }
400
401 pub fn from_access_flags_and_options(
402 prot_flags: ProtectionFlags,
403 options: MappingOptions,
404 ) -> Self {
405 Self::from_bits_truncate(prot_flags.access_flags().bits() as u16)
406 | Self::from_bits_truncate(options.bits() << 3)
407 }
408}
409
410#[derive(Debug, Default)]
411pub struct MappingSummary {
412 no_kind: MappingKindSummary,
413 stack: MappingKindSummary,
414 heap: MappingKindSummary,
415 vdso: MappingKindSummary,
416 vvar: MappingKindSummary,
417 file: MappingKindSummary,
418 vma: MappingKindSummary,
419 ashmem: MappingKindSummary,
420 aiocontext: MappingKindSummary,
421
422 name_lengths: Vec<usize>,
423}
424
425impl MappingSummary {
426 pub fn add(&mut self, mm_state: &MemoryManagerState, mapping: &Mapping) {
427 let kind_summary = match mapping.name() {
428 MappingName::None => &mut self.no_kind,
429 MappingName::Stack => &mut self.stack,
430 MappingName::Heap => &mut self.heap,
431 MappingName::Vdso => &mut self.vdso,
432 MappingName::Vvar => &mut self.vvar,
433 MappingName::File(_) => &mut self.file,
434 MappingName::Vma(name) => {
435 self.name_lengths.push(name.len());
436 &mut self.vma
437 }
438 MappingName::Ashmem(name) => {
439 self.name_lengths.push(name.len());
440 &mut self.ashmem
441 }
442 MappingName::AioContext(_) => &mut self.aiocontext,
443 };
444
445 kind_summary.count += 1;
446 if mapping.flags.contains(MappingFlags::SHARED) {
447 kind_summary.num_shared += 1;
448 } else {
449 kind_summary.num_private += 1;
450 }
451 match mm_state.get_mapping_backing(mapping) {
452 MappingBacking::Memory(_) => {
453 kind_summary.num_memory_objects += 1;
454 }
455 MappingBacking::PrivateAnonymous => kind_summary.num_private_anon += 1,
456 }
457 }
458
459 pub fn record(self, node: &fuchsia_inspect::Node) {
460 node.record_child("no_kind", |node| self.no_kind.record(node));
461 node.record_child("stack", |node| self.stack.record(node));
462 node.record_child("heap", |node| self.heap.record(node));
463 node.record_child("vdso", |node| self.vdso.record(node));
464 node.record_child("vvar", |node| self.vvar.record(node));
465 node.record_child("file", |node| self.file.record(node));
466 node.record_child("vma", |node| self.vma.record(node));
467 node.record_child("ashmem", |node| self.ashmem.record(node));
468 node.record_child("aiocontext", |node| self.aiocontext.record(node));
469
470 let name_lengths = node.create_uint_linear_histogram(
471 "name_lengths",
472 fuchsia_inspect::LinearHistogramParams { floor: 0, step_size: 8, buckets: 4 },
473 );
474 for l in self.name_lengths {
475 name_lengths.insert(l as u64);
476 }
477 node.record(name_lengths);
478 }
479}
480
481#[derive(Debug, Default)]
482struct MappingKindSummary {
483 count: u64,
484 num_private: u64,
485 num_shared: u64,
486 num_memory_objects: u64,
487 num_private_anon: u64,
488}
489
490impl MappingKindSummary {
491 fn record(&self, node: &fuchsia_inspect::Node) {
492 node.record_uint("count", self.count);
493 node.record_uint("num_private", self.num_private);
494 node.record_uint("num_shared", self.num_shared);
495 node.record_uint("num_memory_objects", self.num_memory_objects);
496 node.record_uint("num_private_anon", self.num_private_anon);
497 }
498}