starnix_core/mm/
map_info_cache.rs1use crate::task::CurrentTask;
6use memory_pinning::ShadowProcess;
7use page_buf::PageBuf;
8use starnix_sync::Mutex;
9use starnix_uapi::errors::Errno;
10use starnix_uapi::from_status_like_fdio;
11use std::sync::Arc;
12
13pub struct MapInfoCache {
20 buf: Mutex<PageBuf<zx::MapInfo>>,
21}
22
23const ZIRCON_NAME: zx::Name = zx::Name::new_lossy("starnix_zx_map_info_cache");
24
25impl MapInfoCache {
26 pub fn get_or_init(current_task: &CurrentTask) -> Result<Arc<Self>, Errno> {
27 struct InfoCacheShadowProcess(memory_pinning::ShadowProcess);
29
30 let kernel = current_task.kernel();
31 kernel.expando.get_or_try_init(|| {
32 let pinned_shadow_process = kernel.expando.get_or_try_init(|| {
33 ShadowProcess::new(ZIRCON_NAME)
34 .map(InfoCacheShadowProcess)
35 .map_err(|e| from_status_like_fdio!(e))
36 })?;
37
38 let num_cache_elements = kernel.features.cached_zx_map_info_bytes as usize
39 / std::mem::size_of::<zx::MapInfo>();
40 Self::new(&pinned_shadow_process.0, num_cache_elements)
41 })
42 }
43
44 fn new(shadow_process: &ShadowProcess, num_cache_elements: usize) -> Result<Self, Errno> {
45 let buf = PageBuf::new_with_extra_vmar(num_cache_elements, shadow_process.vmar())
46 .map_err(|e| from_status_like_fdio!(e))?;
47 buf.set_name(&ZIRCON_NAME);
48
49 Ok(Self { buf: Mutex::new(buf) })
50 }
51
52 pub fn with_map_infos<R>(
53 &self,
54 vmar: &zx::Vmar,
55 op: impl FnOnce(Result<&[zx::MapInfo], zx::Status>) -> R,
56 ) -> R {
57 let mut buf = self.buf.lock();
58 match vmar.maps(buf.as_mut()) {
59 Ok((maps, _, avail)) if maps.len() == avail => return op(Ok(maps)),
60 Err(e) => return op(Err(e)),
61
62 Ok(_) => (),
64 }
65 drop(buf);
67
68 match vmar.maps_vec() {
69 Ok(maps) => op(Ok(&maps)),
70 Err(e) => op(Err(e)),
71 }
72 }
73}
74
75#[cfg(test)]
76mod tests {
77 use super::*;
78
79 #[fuchsia::test]
80 async fn basic_test() {
81 let shadow_process = ShadowProcess::new(zx::Name::new_lossy("testing123")).unwrap();
82 let cache = MapInfoCache::new(&shadow_process, 100).unwrap();
83 let maps_base_addr = cache.with_map_infos(&shadow_process.vmar(), |maps| {
84 let maps = maps.unwrap();
85 assert_ne!(maps, &[]);
86 maps.as_ptr() as usize
87 });
88 let cache_base_addr = cache.buf.lock().as_mut().as_ptr() as usize;
89 assert_eq!(maps_base_addr, cache_base_addr, "map infos must have been read into cache");
90 }
91
92 #[fuchsia::test]
93 async fn fall_back_to_heap_over_limit() {
94 let shadow_process = ShadowProcess::new(zx::Name::new_lossy("testing123")).unwrap();
95 let cache = MapInfoCache::new(&shadow_process, 1).unwrap();
96
97 let number_of_extra_mappings =
100 (zx::system_get_page_size() as usize / std::mem::size_of::<zx::MapInfo>()) * 2;
101 let vmo_to_map = zx::Vmo::create(4096).unwrap();
102 for _ in 0..number_of_extra_mappings {
103 fuchsia_runtime::vmar_root_self()
104 .map(0, &vmo_to_map, 0, 4096, zx::VmarFlags::PERM_READ)
105 .unwrap();
106 }
107
108 let maps_base_addr = cache.with_map_infos(&fuchsia_runtime::vmar_root_self(), |maps| {
109 let maps = maps.unwrap();
110 assert!(!maps.is_empty());
111 maps.as_ptr() as usize
112 });
113
114 let cache_base_addr = cache.buf.lock().as_mut().as_ptr() as usize;
115 assert_ne!(maps_base_addr, cache_base_addr, "map infos must not have been read into cache");
116 }
117}