Skip to main content

refaults_vmo/
lib.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
6use std::sync::atomic::{AtomicU64, Ordering};
7use zx::{HandleBased, Rights};
8
9struct SharedAtomicU64(AtomicU64);
10
11unsafe impl MemoryMappable for SharedAtomicU64 {}
12
13pub struct PageRefaultCounter {
14    vmo: zx::Vmo,
15    _storage: MemoryMappedVmo,
16    count_ptr: *const SharedAtomicU64,
17}
18
19// SAFETY: both `vmo`` and `_storage`` are Send, and they are not modified once created (thus, they
20// are Sync). `count_ptr` is a pointer, pointing to the memory mapped region managed by
21// `MemoryMappedVmo`. It is valid as long as `_storage` is valid, and the pointer is not
22// invalidated by moving `PageRefaultCounter` as the memory mapped region stays in the same place.
23unsafe impl Send for PageRefaultCounter {}
24unsafe impl Sync for PageRefaultCounter {}
25
26impl PageRefaultCounter {
27    /// Creates a new read-write PageRefaultCounter.
28    pub fn new() -> Result<Self, zx::Status> {
29        let vmo = zx::Vmo::create(size_of::<AtomicU64>().try_into().unwrap())?;
30        vmo.set_name(&zx::Name::new_lossy("page_refault_counter"))?;
31
32        // SAFETY: all accesses to [storage] are synchronized (through an Atomic).
33        let mut storage: MemoryMappedVmo = unsafe { MemoryMappedVmo::new_readwrite(&vmo)? };
34        let count_ptr: *mut SharedAtomicU64 =
35            storage.get_object_mut::<SharedAtomicU64>(0).map_err(|_| zx::Status::INVALID_ARGS)?;
36        Ok(PageRefaultCounter { vmo: vmo, _storage: storage, count_ptr })
37    }
38
39    /// Creates a new read-only PageRefaultCounter from the provided VMO. The VMO must have the
40    /// READ, MAP, and GET_PROPERTY rights.
41    pub fn from_vmo_readonly(vmo: zx::Vmo) -> Result<Self, zx::Status> {
42        if vmo.get_size()? < size_of::<SharedAtomicU64>().try_into().unwrap() {
43            return Err(zx::Status::INVALID_ARGS);
44        }
45        // SAFETY: all accesses to [storage] are synchronized (through an Atomic).
46        let storage: MemoryMappedVmo = unsafe { MemoryMappedVmo::new_readonly(&vmo)? };
47        let count_ptr: *const SharedAtomicU64 =
48            storage.get_object::<SharedAtomicU64>(0).map_err(|_| zx::Status::INVALID_ARGS)?;
49        Ok(PageRefaultCounter { vmo: vmo, _storage: storage, count_ptr })
50    }
51
52    pub fn increment(&self, count: u64, order: Ordering) {
53        // SAFETY: `self.count_ptr` is non-null per construction, and valid as long as `_storage`
54        // is valid.
55        unsafe { &*self.count_ptr }.0.fetch_add(count, order);
56    }
57
58    pub fn read(&self, order: Ordering) -> u64 {
59        // SAFETY: `self.count_ptr` is non-null per construction, and valid as long as `_storage`
60        // is valid.
61        unsafe { &*self.count_ptr }.0.load(order)
62    }
63
64    /// Returns a read-only handle for the backing VMO.
65    pub fn readonly_vmo(&self) -> Result<zx::Vmo, zx::Status> {
66        self.vmo.duplicate_handle(Rights::BASIC | Rights::READ | Rights::MAP | Rights::GET_PROPERTY)
67    }
68}
69
70#[cfg(test)]
71mod tests {
72    use super::*;
73
74    #[test]
75    fn test_page_refault_counter() {
76        let counter = PageRefaultCounter::new().unwrap();
77
78        let ro_vmo = counter.readonly_vmo().unwrap();
79        let ro_counter = PageRefaultCounter::from_vmo_readonly(ro_vmo).unwrap();
80
81        counter.increment(100, Ordering::SeqCst);
82        assert_eq!(ro_counter.read(Ordering::SeqCst), 100);
83
84        counter.increment(100, Ordering::SeqCst);
85        assert_eq!(ro_counter.read(Ordering::SeqCst), 200);
86    }
87}