Skip to main content

starnix_core/mm/
map_info_cache.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::task::CurrentTask;
6use memory_pinning::ShadowProcess;
7use page_buf::PageBuf;
8use starnix_sync::Mutex;
9use starnix_uapi::errors::Errno;
10use starnix_uapi::from_status_like_fdio;
11use std::sync::Arc;
12
13/// A singleton cache that can be used to share pinned pages across all map info queries. These
14/// queries often generate lock contention in Zircon on Starnix's shared address space when the
15/// buffer pages are faulted in. This type holds a long-running allocation in pinned memory to
16/// get reduced Zircon VmAspace lock contention in exchange for some extra memory usage and
17/// potential lock contention for /proc/pid/status and related files. Luckily it seems that
18/// in practice files like /proc/pid/status are not regularly accessed concurrently.
19pub struct MapInfoCache {
20    buf: Mutex<PageBuf<zx::MapInfo>>,
21}
22
23const ZIRCON_NAME: zx::Name = zx::Name::new_lossy("starnix_zx_map_info_cache");
24
25impl MapInfoCache {
26    pub fn get_or_init(current_task: &CurrentTask) -> Result<Arc<Self>, Errno> {
27        let kernel = current_task.kernel();
28        kernel.expando.get_or_try_init(|| {
29            let pinned_shadow_process = kernel.expando.get_or_try_init(|| {
30                ShadowProcess::new(ZIRCON_NAME)
31                    .map(InfoCacheShadowProcess)
32                    .map_err(|e| from_status_like_fdio!(e))
33            })?;
34
35            let num_cache_elements = kernel.features.cached_zx_map_info_bytes as usize
36                / std::mem::size_of::<zx::MapInfo>();
37            Self::new(&pinned_shadow_process.0, num_cache_elements)
38        })
39    }
40
41    fn new(shadow_process: &ShadowProcess, num_cache_elements: usize) -> Result<Self, Errno> {
42        let buf = PageBuf::new_with_extra_vmar(num_cache_elements, shadow_process.vmar())
43            .map_err(|e| from_status_like_fdio!(e))?;
44        buf.set_name(&ZIRCON_NAME);
45
46        Ok(Self { buf: Mutex::new(buf) })
47    }
48
49    pub fn with_map_infos<R>(
50        &self,
51        vmar: &zx::Vmar,
52        op: impl FnOnce(Result<&[zx::MapInfo], zx::Status>) -> R,
53    ) -> R {
54        let mut buf = self.buf.lock();
55        match vmar.maps(buf.as_mut()) {
56            Ok((maps, _, avail)) if maps.len() == avail => return op(Ok(maps)),
57            Err(e) => return op(Err(e)),
58
59            // The call succeeded but the buffer wasn't big enough, fall back to a heap allocation.
60            Ok(_) => (),
61        }
62        // No need to hold this lock while we're using the heap instead.
63        drop(buf);
64
65        match vmar.maps_vec() {
66            Ok(maps) => op(Ok(&maps)),
67            Err(e) => op(Err(e)),
68        }
69    }
70}
71
72/// The memory pinning shadow process used for zx::MapInfo buffers.
73///
74/// Uses its own distinct shadow process so that it doesn't interfere with other uses of memory
75/// pinning.
76pub struct InfoCacheShadowProcess(ShadowProcess);
77
78#[cfg(test)]
79mod tests {
80    use super::*;
81
82    #[fuchsia::test]
83    async fn basic_test() {
84        let shadow_process = ShadowProcess::new(zx::Name::new_lossy("testing123")).unwrap();
85        let cache = MapInfoCache::new(&shadow_process, 100).unwrap();
86        let maps_base_addr = cache.with_map_infos(&shadow_process.vmar(), |maps| {
87            let maps = maps.unwrap();
88            assert_ne!(maps, &[]);
89            maps.as_ptr() as usize
90        });
91        let cache_base_addr = cache.buf.lock().as_mut().as_ptr() as usize;
92        assert_eq!(maps_base_addr, cache_base_addr, "map infos must have been read into cache");
93    }
94
95    #[fuchsia::test]
96    async fn fall_back_to_heap_over_limit() {
97        let shadow_process = ShadowProcess::new(zx::Name::new_lossy("testing123")).unwrap();
98        let cache = MapInfoCache::new(&shadow_process, 1).unwrap();
99
100        // Ensure that the test process' root VMAR has more mappings than can fit in the single page
101        // of the cache's buffer.
102        let number_of_extra_mappings =
103            (zx::system_get_page_size() as usize / std::mem::size_of::<zx::MapInfo>()) * 2;
104        let vmo_to_map = zx::Vmo::create(4096).unwrap();
105        for _ in 0..number_of_extra_mappings {
106            fuchsia_runtime::vmar_root_self()
107                .map(0, &vmo_to_map, 0, 4096, zx::VmarFlags::PERM_READ)
108                .unwrap();
109        }
110
111        let maps_base_addr = cache.with_map_infos(&fuchsia_runtime::vmar_root_self(), |maps| {
112            let maps = maps.unwrap();
113            assert!(!maps.is_empty());
114            maps.as_ptr() as usize
115        });
116
117        let cache_base_addr = cache.buf.lock().as_mut().as_ptr() as usize;
118        assert_ne!(maps_base_addr, cache_base_addr, "map infos must not have been read into cache");
119    }
120}