Skip to main content

refaults_vmo/
lib.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use memory_mapped_vmo::MemoryMappedVmo;
6use std::sync::atomic::{AtomicU64, Ordering};
7use zx::{HandleBased, Rights};
8
9pub struct PageRefaultCounter {
10    vmo: zx::Vmo,
11    _storage: MemoryMappedVmo,
12    count_ptr: *const AtomicU64,
13}
14
15// SAFETY: both `vmo`` and `_storage`` are Send, and they are not modified once created (thus, they
16// are Sync). `count_ptr` is a pointer, pointing to the memory mapped region managed by
17// `MemoryMappedVmo`. It is valid as long as `_storage` is valid, and the pointer is not
18// invalidated by moving `PageRefaultCounter` as the memory mapped region stays in the same place.
19unsafe impl Send for PageRefaultCounter {}
20unsafe impl Sync for PageRefaultCounter {}
21
22impl PageRefaultCounter {
23    /// Creates a new read-write PageRefaultCounter.
24    pub fn new() -> Result<Self, zx::Status> {
25        let vmo = zx::Vmo::create(size_of::<AtomicU64>().try_into().unwrap())?;
26        vmo.set_name(&zx::Name::new_lossy("page_refault_counter"))?;
27
28        // SAFETY: all accesses to [storage] are synchronized (through an Atomic).
29        let mut storage: MemoryMappedVmo = unsafe { MemoryMappedVmo::new_readwrite(&vmo)? };
30        let count_ptr: *mut AtomicU64 =
31            storage.get_object_mut::<AtomicU64>(0).map_err(|_| zx::Status::INVALID_ARGS)?;
32        Ok(PageRefaultCounter { vmo: vmo, _storage: storage, count_ptr })
33    }
34
35    /// Creates a new read-only PageRefaultCounter from the provided VMO. The VMO must have the
36    /// READ, MAP, and GET_PROPERTY rights.
37    pub fn from_vmo_readonly(vmo: zx::Vmo) -> Result<Self, zx::Status> {
38        if vmo.get_size()? < size_of::<AtomicU64>().try_into().unwrap() {
39            return Err(zx::Status::INVALID_ARGS);
40        }
41        // SAFETY: all accesses to [storage] are synchronized (through an Atomic).
42        let storage: MemoryMappedVmo = unsafe { MemoryMappedVmo::new_readonly(&vmo)? };
43        let count_ptr: *const AtomicU64 =
44            storage.get_object::<AtomicU64>(0).map_err(|_| zx::Status::INVALID_ARGS)?;
45        Ok(PageRefaultCounter { vmo: vmo, _storage: storage, count_ptr })
46    }
47
48    pub fn increment(&self, count: u64, order: Ordering) {
49        // SAFETY: `self.count_ptr` is non-null per construction, and valid as long as `_storage`
50        // is valid.
51        unsafe { &*self.count_ptr }.fetch_add(count, order);
52    }
53
54    pub fn read(&self, order: Ordering) -> u64 {
55        // SAFETY: `self.count_ptr` is non-null per construction, and valid as long as `_storage`
56        // is valid.
57        unsafe { &*self.count_ptr }.load(order)
58    }
59
60    /// Returns a read-only handle for the backing VMO.
61    pub fn readonly_vmo(&self) -> Result<zx::Vmo, zx::Status> {
62        self.vmo.duplicate_handle(Rights::BASIC | Rights::READ | Rights::MAP | Rights::GET_PROPERTY)
63    }
64}
65
66#[cfg(test)]
67mod tests {
68    use super::*;
69
70    #[test]
71    fn test_page_refault_counter() {
72        let counter = PageRefaultCounter::new().unwrap();
73
74        let ro_vmo = counter.readonly_vmo().unwrap();
75        let ro_counter = PageRefaultCounter::from_vmo_readonly(ro_vmo).unwrap();
76
77        counter.increment(100, Ordering::SeqCst);
78        assert_eq!(ro_counter.read(Ordering::SeqCst), 100);
79
80        counter.increment(100, Ordering::SeqCst);
81        assert_eq!(ro_counter.read(Ordering::SeqCst), 200);
82    }
83}