Skip to main content

fxfs_platform/fuchsia/
pager.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fuchsia::errors::map_to_status;
6use crate::fuchsia::node::FxNode;
7use crate::fuchsia::profile::Recorder;
8use anyhow::Error;
9use bitflags::bitflags;
10use fuchsia_async::epoch::{Epoch, EpochGuard};
11use fuchsia_async::{self as fasync};
12use fuchsia_sync::{Mutex, MutexGuard};
13use fxfs::future_with_guard::FutureWithGuard;
14use fxfs::log::*;
15use fxfs::range::RangeExt;
16use fxfs::round::{round_down, round_up};
17use std::future::Future;
18use std::marker::PhantomData;
19use std::mem::MaybeUninit;
20use std::ops::{Deref, Range};
21use std::sync::atomic::{AtomicU64, Ordering};
22use std::sync::{Arc, Weak};
23use storage_device::buffer;
24use vfs::execution_scope::ExecutionScope;
25use zx::sys::zx_page_request_command_t::{ZX_PAGER_VMO_DIRTY, ZX_PAGER_VMO_READ};
26use zx::{PacketContents, PagerPacket, SignalPacket};
27
28pub static STRONG_FILE_REFS: AtomicU64 = AtomicU64::new(0);
29
30fn watch_for_zero_children(file: &impl PagerBacked) -> Result<(), zx::Status> {
31    file.vmo().wait_async(
32        file.pager().executor.port(),
33        file.pager_packet_receiver_registration().key(),
34        zx::Signals::VMO_ZERO_CHILDREN,
35        zx::WaitAsyncOpts::empty(),
36    )
37}
38
39pub type PagerPacketReceiverRegistration<T> = fasync::ReceiverRegistration<PagerPacketReceiver<T>>;
40
41/// A `fuchsia_async::PacketReceiver` that handles pager packets and the `VMO_ZERO_CHILDREN` signal.
42pub struct PagerPacketReceiver<T> {
43    file: Mutex<FileHolder<T>>,
44}
45
46/// A returnable lock held on the receiver.
47pub struct PagerPacketReceiverLock<'a, T> {
48    _guard: MutexGuard<'a, FileHolder<T>>,
49    strong: bool,
50}
51
52impl<T> PagerPacketReceiverLock<'_, T> {
53    /// Returns true if the receiver was installed as a strong.
54    pub fn is_strong(&self) -> bool {
55        self.strong
56    }
57}
58
59impl<T: PagerBacked> PagerPacketReceiver<T> {
60    /// Drops the strong reference to the file that might be held if
61    /// `Pager::watch_for_zero_children` was called. This should only be used when forcibly dropping
62    /// the file object. Calls `on_zero_children` if the strong reference was held.
63    pub fn stop_watching_for_zero_children(&self) {
64        let mut file = self.file.lock();
65        if let FileHolder::Strong(strong) = &*file {
66            let weak = FileHolder::Weak(Arc::downgrade(&strong));
67            let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
68                unreachable!();
69            };
70            STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
71            strong.on_zero_children();
72        }
73    }
74
75    /// Sets the current receiver and returns the lock guard so that it can be held after the value
76    /// is set. Currently this allows synchronizing open count adjustments.
77    pub fn set_receiver(&self, new_receiver: &Arc<T>) -> PagerPacketReceiverLock<'_, T> {
78        let mut receiver_lock = self.file.lock();
79        let strong = match &mut *receiver_lock {
80            FileHolder::Strong(arc) => {
81                *arc = new_receiver.clone();
82                true
83            }
84            FileHolder::Weak(arc) => {
85                *arc = Arc::downgrade(new_receiver);
86                false
87            }
88        };
89        PagerPacketReceiverLock { _guard: receiver_lock, strong }
90    }
91
92    fn receive_pager_packet(&self, contents: PagerPacket) {
93        let command = contents.command();
94        if command != ZX_PAGER_VMO_READ && command != ZX_PAGER_VMO_DIRTY {
95            return;
96        }
97
98        let (file, epoch_guard) = {
99            let file_lock = self.file.lock();
100            let file = match &*file_lock {
101                FileHolder::Strong(file) => file.clone(),
102                FileHolder::Weak(file) => {
103                    if let Some(file) = file.upgrade() {
104                        file
105                    } else {
106                        error!("Received a page request for a file that is closed {:?}", contents);
107                        return;
108                    }
109                }
110            };
111
112            // Whenever a file is flushed, we must make sure existing page requests for a file are
113            // completed to eliminate the possibility of supplying stale data for a file.  We solve
114            // this by using a barrier when we flush to wait for outstanding page requests to
115            // finish.  Technically, we only need to wait for page requests for the specific file
116            // being flushed, but we should see if we need to for performance reasons first.
117            let epoch_guard = match command {
118                // Don't take refs for mark_dirty, it can block on flushes which block on the epoch
119                // creating a deadlock. The call for awaiting epochs is `page_in_barrier` which
120                // correctly implies that it should only wait on page in.
121                ZX_PAGER_VMO_READ => Some(Epoch::global().guard()),
122                _ => None,
123            };
124            (file, epoch_guard)
125        };
126
127        // The scope guard needs to be held and outlive the file Arc and the clones of it.
128        let Some(_scope_guard) = file.pager().scope.try_active_guard() else {
129            // If an active guard can't be acquired then the filesystem must be shutting down. Fail
130            // the page request to avoid leaving the client hanging.
131            file.pager().report_failure(file.vmo(), contents.range(), zx::Status::BAD_STATE);
132            return;
133        };
134        match command {
135            ZX_PAGER_VMO_READ => {
136                file.clone().page_in(PageInRange::new(contents.range(), file, epoch_guard.unwrap()))
137            }
138            ZX_PAGER_VMO_DIRTY => {
139                file.clone().mark_dirty(MarkDirtyRange::new(contents.range(), file))
140            }
141            _ => unreachable!("Unhandled commands are filtered above"),
142        }
143    }
144
145    fn receive_signal_packet(&self, signals: SignalPacket) {
146        assert!(signals.observed().contains(zx::Signals::VMO_ZERO_CHILDREN));
147
148        // Check to see if there really are no children (which is necessary to avoid races) and, if
149        // so, replace the strong reference with a weak one and call on_zero_children on the node.
150        // If the file does have children, this asks the kernel to send us the ON_ZERO_CHILDREN
151        // notification for the file.
152        let mut file = self.file.lock();
153        if let FileHolder::Strong(strong) = &*file {
154            // If the last strong reference to the Arc is dropped here, then FxVolume's shutdown
155            // won't wait for the inner node object to be dropped. Taking an active guard around
156            // dropping the strong reference forces the FxVolume to wait for the file to be dropped.
157            // If the scope has begun shutdown then we can't take an active guard, so instead we do
158            // nothing here and the strong reference in the FileHolder will be removed by calling
159            // `FxNode.terminate()` as part of `NodeCache.terminate()` in the FxVolume termination
160            // thread.
161            let Some(_guard) = strong.pager().scope.try_active_guard() else {
162                info!("Ignoring zero-children notification due to shutting down");
163                return;
164            };
165            match strong.vmo().info() {
166                Ok(info) => {
167                    if info.num_children == 0 {
168                        let weak = FileHolder::Weak(Arc::downgrade(&strong));
169                        let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
170                            unreachable!();
171                        };
172                        STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
173                        strong.on_zero_children();
174                    } else {
175                        // There's not much we can do here if this fails, so we panic.
176                        watch_for_zero_children(strong.as_ref()).unwrap();
177                    }
178                }
179                Err(e) => error!(error:? = e; "Vmo::info failed"),
180            }
181        }
182    }
183}
184
185impl<T: PagerBacked> fasync::PacketReceiver for PagerPacketReceiver<T> {
186    fn receive_packet(&self, packet: zx::Packet) {
187        match packet.contents() {
188            PacketContents::Pager(contents) => {
189                self.receive_pager_packet(contents);
190            }
191            PacketContents::SignalOne(signals) => {
192                self.receive_signal_packet(signals);
193            }
194            _ => unreachable!(), // We don't expect any other kinds of packets.
195        }
196    }
197}
198
199pub struct Pager {
200    pager: zx::Pager,
201    scope: ExecutionScope,
202    executor: fasync::EHandle,
203    recorder: Mutex<Option<Box<dyn Recorder>>>,
204}
205
206// FileHolder is used to retain either a strong or a weak reference to a file.  If there are any
207// child VMOs that have been shared, then we will have a strong reference which is required to keep
208// the file alive.  When we detect that there are no more children, we can downgrade to a weak
209// reference which will allow the file to be cleaned up if there are no other uses.
210enum FileHolder<T> {
211    Strong(Arc<T>),
212    Weak(Weak<T>),
213}
214
215/// Pager handles page requests. It is a per-volume object.
216impl Pager {
217    /// Creates a new pager.
218    pub fn new(scope: ExecutionScope) -> Result<Self, Error> {
219        Ok(Pager {
220            pager: zx::Pager::create(zx::PagerOptions::empty())?,
221            scope,
222            executor: fasync::EHandle::local(),
223            recorder: Mutex::new(None),
224        })
225    }
226
227    /// Spawns a short term task for the pager that includes a guard that will prevent termination.
228    fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) {
229        if let Some(guard) = self.scope.try_active_guard() {
230            self.executor.spawn_detached(FutureWithGuard::new(guard, task));
231        }
232    }
233
234    /// Set the current profile recorder, or set to None to not record.
235    pub fn set_recorder(&self, recorder: Option<Box<dyn Recorder>>) {
236        // Drop the old one outside of the lock.
237        let _old = std::mem::replace(&mut (*self.recorder.lock()), recorder);
238    }
239
240    /// Borrow the profile recorder. Used to record file opens.
241    pub fn recorder(&self) -> MutexGuard<'_, Option<Box<dyn Recorder>>> {
242        self.recorder.lock()
243    }
244
245    /// Record a range into a profile if one is being recorded.
246    pub fn record_page_in<P: PagerBacked>(&self, node: Arc<P>, range: Range<u64>) {
247        let mut recorder_holder = self.recorder.lock();
248        if let Some(recorder) = &mut (*recorder_holder) {
249            // If the message fails to send, so will all the rest.
250            if let Err(_) = recorder.record(node, range.start) {
251                *recorder_holder = None;
252            }
253        }
254    }
255
256    /// Creates a new VMO to be used with the pager.
257    pub fn create_vmo<T: PagerBacked>(
258        &self,
259        file: Weak<T>,
260        initial_size: u64,
261        vmo_options: zx::VmoOptions,
262    ) -> Result<(zx::Vmo, PagerPacketReceiverRegistration<T>), Error> {
263        let registration = self
264            .executor
265            .register_receiver(PagerPacketReceiver { file: Mutex::new(FileHolder::Weak(file)) });
266        Ok((
267            self.pager.create_vmo(
268                vmo_options,
269                self.executor.port(),
270                registration.key(),
271                initial_size,
272            )?,
273            registration,
274        ))
275    }
276
277    /// Starts watching for the `VMO_ZERO_CHILDREN` signal on `file`'s vmo. Returns false if the
278    /// signal is already being watched for. When the pager receives the `VMO_ZERO_CHILDREN` signal
279    /// [`PagerBacked::on_zero_children`] will be called.
280    pub fn watch_for_zero_children(&self, file: &impl PagerBacked) -> Result<bool, Error> {
281        let mut file = file.pager_packet_receiver_registration().file.lock();
282
283        match &*file {
284            FileHolder::Weak(weak) => {
285                // Should never fail because watch_for_zero_children should be called from `file`.
286                let strong = weak.upgrade().unwrap();
287
288                watch_for_zero_children(strong.as_ref())?;
289
290                STRONG_FILE_REFS.fetch_add(1, Ordering::Relaxed);
291                *file = FileHolder::Strong(strong);
292                Ok(true)
293            }
294            FileHolder::Strong(_) => Ok(false),
295        }
296    }
297
298    /// Supplies pages in response to a `ZX_PAGER_VMO_READ` page request. See
299    /// `zx_pager_supply_pages` for more information.
300    fn supply_pages(
301        &self,
302        vmo: &zx::Vmo,
303        range: Range<u64>,
304        transfer_vmo: &zx::Vmo,
305        transfer_offset: u64,
306    ) {
307        if let Err(e) = self.pager.supply_pages(vmo, range, transfer_vmo, transfer_offset) {
308            error!(error:? = e; "supply_pages failed");
309        }
310    }
311
312    /// Notifies the kernel that a page request for the given `range` has failed. Sent in response
313    /// to a `ZX_PAGER_VMO_READ` or `ZX_PAGER_VMO_DIRTY` page request. See `ZX_PAGER_OP_FAIL` for
314    /// more information.
315    fn report_failure(&self, vmo: &zx::Vmo, range: Range<u64>, status: zx::Status) {
316        let pager_status = match status {
317            zx::Status::IO_DATA_INTEGRITY => zx::Status::IO_DATA_INTEGRITY,
318            zx::Status::NO_SPACE => zx::Status::NO_SPACE,
319            zx::Status::FILE_BIG => zx::Status::BUFFER_TOO_SMALL,
320            zx::Status::IO
321            | zx::Status::IO_DATA_LOSS
322            | zx::Status::IO_INVALID
323            | zx::Status::IO_MISSED_DEADLINE
324            | zx::Status::IO_NOT_PRESENT
325            | zx::Status::IO_OVERRUN
326            | zx::Status::IO_REFUSED
327            | zx::Status::PEER_CLOSED => zx::Status::IO,
328            _ => zx::Status::BAD_STATE,
329        };
330        if let Err(e) = self.pager.op_range(zx::PagerOp::Fail(pager_status), vmo, range) {
331            error!(error:? = e; "op_range failed");
332        }
333    }
334
335    /// Allows the kernel to dirty the `range` of pages. Sent in response to a `ZX_PAGER_VMO_DIRTY`
336    /// page request. See `ZX_PAGER_OP_DIRTY` for more information.
337    fn dirty_pages(&self, vmo: &zx::Vmo, range: Range<u64>) {
338        if let Err(e) = self.pager.op_range(zx::PagerOp::Dirty, vmo, range) {
339            // It is possible for `ZX_ERR_NOT_FOUND` to be returned on a clean page that has been
340            // evicted. In this case, the  kernel will retry if necessary. Unfortunately, this will
341            // cause a mismatch in the accounting between Fxfs and the kernel but there is nothing
342            // we can do about that right now. See https://fxubg.dev/42086069 for more information.
343            if e != zx::Status::NOT_FOUND {
344                error!(error:? = e; "dirty_pages failed");
345            }
346        }
347    }
348
349    /// Notifies the kernel that the filesystem has started cleaning the `range` of pages. See
350    /// `ZX_PAGER_OP_WRITEBACK_BEGIN` for more information.
351    pub fn writeback_begin(
352        &self,
353        vmo: &zx::Vmo,
354        range: Range<u64>,
355        options: zx::PagerWritebackBeginOptions,
356    ) {
357        if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackBegin(options), vmo, range) {
358            error!(error:? = e; "writeback_begin failed");
359        }
360    }
361
362    /// Notifies the kernel that the filesystem has finished cleaning the `range` of pages. See
363    /// `ZX_PAGER_OP_WRITEBACK_END` for more information.
364    pub fn writeback_end(&self, vmo: &zx::Vmo, range: Range<u64>) {
365        if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackEnd, vmo, range) {
366            error!(error:? = e; "writeback_end failed");
367        }
368    }
369
370    /// Queries the `vmo` for ranges that are dirty within `range`. Returns `(num_returned,
371    /// num_remaining)` where `num_returned` is the number of objects populated in `buffer` and
372    /// `num_remaining` is the number of dirty ranges remaining in `range` that could not fit in
373    /// `buffer`. See `zx_pager_query_dirty_ranges` for more information.
374    pub fn query_dirty_ranges(
375        &self,
376        vmo: &zx::Vmo,
377        range: Range<u64>,
378        buffer: &mut [VmoDirtyRange],
379    ) -> Result<(usize, usize), zx::Status> {
380        let mut actual = 0;
381        let mut avail = 0;
382        let status = unsafe {
383            // TODO(https://fxbug.dev/42142550) Move to src/lib/zircon/rust/src/pager.rs once
384            // query_dirty_ranges is part of the stable vDSO.
385            zx::sys::zx_pager_query_dirty_ranges(
386                self.pager.raw_handle(),
387                vmo.raw_handle(),
388                range.start,
389                range.end - range.start,
390                buffer.as_mut_ptr() as *mut u8,
391                std::mem::size_of_val(buffer),
392                &mut actual as *mut usize,
393                &mut avail as *mut usize,
394            )
395        };
396        zx::ok(status).map(|_| (actual, avail - actual))
397    }
398
399    /// Queries the `vmo` for any pager related statistics. If
400    /// `PagerVmoStatsOptions::RESET_VMO_STATS` is passed then the stats will also be reset. See
401    /// `zx_pager_query_vmo_stats` for more information.
402    pub fn query_vmo_stats(
403        &self,
404        vmo: &zx::Vmo,
405        options: PagerVmoStatsOptions,
406    ) -> Result<PagerVmoStats, zx::Status> {
407        #[repr(C)]
408        #[derive(Default)]
409        struct zx_pager_vmo_stats {
410            pub modified: u32,
411        }
412        const ZX_PAGER_VMO_STATS_MODIFIED: u32 = 1;
413        let mut vmo_stats = MaybeUninit::<zx_pager_vmo_stats>::uninit();
414        let status = unsafe {
415            // TODO(https://fxbug.dev/42142550) Move to src/lib/zircon/rust/src/pager.rs once
416            // query_vmo_stats is part of the stable vDSO.
417            zx::sys::zx_pager_query_vmo_stats(
418                self.pager.raw_handle(),
419                vmo.raw_handle(),
420                options.bits(),
421                vmo_stats.as_mut_ptr() as *mut u8,
422                std::mem::size_of::<zx_pager_vmo_stats>(),
423            )
424        };
425        zx::ok(status)?;
426        let vmo_stats = unsafe { vmo_stats.assume_init() };
427        Ok(PagerVmoStats { was_vmo_modified: vmo_stats.modified == ZX_PAGER_VMO_STATS_MODIFIED })
428    }
429
430    pub async fn page_in_barrier() {
431        Epoch::global().barrier().await;
432    }
433}
434
435/// This is a trait for objects (files/blobs) that expose a pager backed VMO.
436pub trait PagerBacked: FxNode + Sync + Send + Sized + 'static {
437    /// The pager backing this VMO.
438    fn pager(&self) -> &Pager;
439
440    /// The receiver registration returned from [`Pager::create_vmo`].
441    fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self>;
442
443    /// The pager backed VMO that this object is handling packets for. The VMO must be created with
444    /// [`Pager::create_vmo`].
445    fn vmo(&self) -> &zx::Vmo;
446
447    /// Called by the pager when a `ZX_PAGER_VMO_READ` packet is received for the VMO. The
448    /// implementation must respond by calling either `PageInRange::supply_pages` or
449    /// `PageInRange::report_failure`.
450    fn page_in(self: Arc<Self>, range: PageInRange<Self>);
451
452    /// Called by the pager when a `ZX_PAGER_VMO_DIRTY` packet is received for the VMO. The
453    /// implementation must respond by calling either `MarkDirtyRange::dirty_pages` or
454    /// `MarkDirtyRange::report_failure`.
455    fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>);
456
457    /// Called by the pager to indicate there are no more VMO children.
458    fn on_zero_children(self: Arc<Self>);
459
460    /// Total bytes readable. Anything reads over this will be zero padded in the VMO.
461    fn byte_size(&self) -> u64;
462
463    /// Reads one or more blocks into a buffer and returns it. This method is called by
464    /// `default_page_in` and `aligned_byte_range` will always be aligned to the `read_ahead_size`
465    /// past to `default_page_in` unless that would extend beyond `self.byte_size()`, in which case,
466    /// `aligned_byte_range` will end at `self.byte_size()`'s next page multiple. The returned
467    /// buffer must be at least as large as the requested range. Only the requested range will be
468    /// supplied to the pager.
469    fn aligned_read(
470        &self,
471        aligned_byte_range: std::ops::Range<u64>,
472    ) -> impl Future<Output = Result<buffer::Buffer<'_>, Error>> + Send;
473}
474
475/// A generic page_in implementation that supplies pages using block-aligned reads.
476pub fn default_page_in<P: PagerBacked>(
477    this: Arc<P>,
478    pager_range: PageInRange<P>,
479    read_ahead_size: u64,
480) {
481    fxfs_trace::duration!(
482        "start-page-in",
483        "offset" => pager_range.start(),
484        "len" => pager_range.len()
485    );
486
487    const ZERO_VMO_SIZE: u64 = 1_048_576;
488    static ZERO_VMO: std::sync::LazyLock<zx::Vmo> =
489        std::sync::LazyLock::new(|| zx::Vmo::create(ZERO_VMO_SIZE).unwrap());
490
491    assert!(pager_range.end() < i64::MAX as u64);
492
493    // Two important subtleties to consider in this space:
494    //
495    // `byte_size` is the official size of the object. VMOs are page-aligned so `page_aligned_size`
496    // is the "official" page length of the object. This may be smaller than Vmo::get_size because
497    // these two things are not updated atomically. The reverse is not true -- We do not currently
498    // ever shrink a VMO's size. We also do not update byte_size (self.handle.get_size()) if an
499    // independent handle is used to grow a file. This means the VMO's size should always be
500    // strictly equal or bigger than `byte_size`.
501    //
502    // It is valid to supply more pages than asked, but supplying pages outside of the VMO range
503    // will trigger OUT_OF_RANGE errors and the call will fail without supplying anything. We must
504    // supply the range requested under all circumstances to unblock any page misses but we should
505    // take care to never supply additional pages beyond `page_aligned_size` as there is a chance
506    // that we might serve a range outside of the VMO and fail to supply anything at all.
507
508    let page_aligned_size = round_up(this.byte_size(), page_size()).unwrap();
509
510    // Zero-pad the tail if the requested range exceeds the size of the thing we're reading. This
511    // can happen when we truncate and there are outstanding pager requests that the kernel was not
512    // able to cancel in time.
513    let (read_range, zero_range) = pager_range.split(page_aligned_size);
514    if let Some(zero_range) = zero_range {
515        for range in zero_range.chunks(ZERO_VMO_SIZE) {
516            range.supply_pages(&ZERO_VMO, 0);
517        }
518    }
519
520    if let Some(read_range) = read_range {
521        let expanded_range_for_readahead = round_down(read_range.start(), read_ahead_size)
522            ..std::cmp::min(
523                round_up(read_range.end(), read_ahead_size).unwrap(),
524                page_aligned_size,
525            );
526        let read_range = read_range.expand(expanded_range_for_readahead);
527        for range in read_range.chunks(read_ahead_size) {
528            // Record the page in before spawning the task to handle the page-in. This is necessary
529            // so that we don't miss this page-in when replaying and recording a new profile.  The
530            // replay is considered finished once we've responded to the page request, so if we if
531            // we spawn the page request before recording the page-in, it's possible (albeit
532            // unlikely) that the profiler can think the replay has finished, but not know about the
533            // page request and so the next recording to be missing the page request.  With the
534            // order swapped, the `test_profile` test would have a rare flake.
535            this.pager().record_page_in(this.clone(), range.range.clone());
536
537            this.pager().spawn(page_in_chunk(this.clone(), range));
538        }
539    }
540}
541
542#[fxfs_trace::trace("offset" => read_range.start(), "len" => read_range.len())]
543async fn page_in_chunk<P: PagerBacked>(this: Arc<P>, read_range: PageInRange<P>) {
544    let buffer = match this.aligned_read(read_range.range()).await {
545        Ok(v) => v,
546        Err(error) => {
547            error!(range:? = read_range.range(), error:?; "Failed to load range");
548            read_range.report_failure(map_to_status(error));
549            return;
550        }
551    };
552    assert!(
553        buffer.len() as u64 >= read_range.len(),
554        "A buffer smaller than requested was returned. requested: {}, returned: {}",
555        read_range.len(),
556        buffer.len()
557    );
558    read_range.supply_pages(buffer.allocator().buffer_source().vmo(), buffer.range().start as u64);
559}
560
561/// Represents a dirty range of page aligned bytes within a pager backed VMO.
562#[repr(C)]
563#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
564pub struct VmoDirtyRange {
565    offset: u64,
566    length: u64,
567    options: u64,
568}
569
570impl VmoDirtyRange {
571    /// The page aligned byte range.
572    pub fn range(&self) -> Range<u64> {
573        self.offset..(self.offset + self.length)
574    }
575
576    /// Returns true if all of the bytes in the range are 0.
577    pub fn is_zero_range(&self) -> bool {
578        self.options & zx::sys::ZX_VMO_DIRTY_RANGE_IS_ZERO != 0
579    }
580}
581
582bitflags! {
583    /// Options for `Pager::query_vmo_stats`.
584    #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
585    #[repr(transparent)]
586    pub struct PagerVmoStatsOptions: u32 {
587        /// Resets the stats at the of the `Pager::query_vmo_stats` call.
588        const RESET_VMO_STATS = 1;
589    }
590}
591
592/// Pager related statistic for a VMO.
593#[derive(Debug)]
594pub struct PagerVmoStats {
595    was_vmo_modified: bool,
596}
597
598impl PagerVmoStats {
599    /// Returns true if the VMO was modified since the last time the VMO stats were reset.
600    pub fn was_vmo_modified(&self) -> bool {
601        self.was_vmo_modified
602    }
603}
604
605#[inline]
606fn page_size() -> u64 {
607    zx::system_get_page_size().into()
608}
609
610/// A trait for specializing `PagerRange` for different request types.
611pub trait PagerRequestType {
612    /// Returns the name of the request type for logging purposes.
613    fn request_type_name() -> &'static str;
614}
615
616/// A request generated from a ZX_PAGER_VMO_READ packet.
617pub struct PageInRequest;
618
619impl PagerRequestType for PageInRequest {
620    fn request_type_name() -> &'static str {
621        "PageInRequest"
622    }
623}
624
625/// The requested range from a ZX_PAGER_VMO_READ packet. This object must not be dropped without
626/// calling either `supply_pages` or `report_failure`.
627pub type PageInRange<T> = PagerRange<T, PageInRequest>;
628
629impl<T: PagerBacked> PageInRange<T> {
630    /// Constructs a new `PageInRange<T>`. `range` must be page aligned.
631    pub fn new(range: Range<u64>, file: Arc<T>, epoch_guard: EpochGuard<'static>) -> Self {
632        debug_assert!(
633            range.start % page_size() == 0 && range.end % page_size() == 0,
634            "{:?} is not page aligned",
635            range
636        );
637        Self {
638            range,
639            inner: Some(PagerRangeInner { file, _epoch_guard: Some(epoch_guard) }),
640            _request_type: PhantomData,
641        }
642    }
643
644    /// Supplies pages to the kernel for this range. See `zx_pager_supply_pages` for more
645    /// information.
646    pub fn supply_pages(mut self, transfer_vmo: &zx::Vmo, transfer_offset: u64) {
647        let inner = self.inner.take().unwrap();
648        inner.file.pager().supply_pages(
649            inner.file.vmo(),
650            self.range.clone(),
651            transfer_vmo,
652            transfer_offset,
653        );
654    }
655}
656
657/// A requested generated from a ZX_PAGER_VMO_DIRTY packet.
658#[derive(Debug)]
659pub struct MarkDirtyRequest;
660
661impl PagerRequestType for MarkDirtyRequest {
662    fn request_type_name() -> &'static str {
663        "MarkDirtyRequest"
664    }
665}
666
667/// The requested range from a ZX_PAGER_VMO_DIRTY packet. This object must not be dropped without
668/// calling either `mark_dirty` or `report_failure`.
669pub type MarkDirtyRange<T> = PagerRange<T, MarkDirtyRequest>;
670
671impl<T: PagerBacked> MarkDirtyRange<T> {
672    /// Constructs a new `MarkDirtyRange<T>`. `range` must be page aligned.
673    pub fn new(range: Range<u64>, file: Arc<T>) -> Self {
674        debug_assert!(
675            range.start % page_size() == 0 && range.end % page_size() == 0,
676            "{:?} is not page aligned",
677            range
678        );
679        Self {
680            range,
681            inner: Some(PagerRangeInner { file, _epoch_guard: None }),
682            _request_type: PhantomData,
683        }
684    }
685
686    /// Allows the kernel to dirty this range of pages. See `ZX_PAGER_OP_DIRTY` for more
687    /// information.
688    pub fn dirty_pages(mut self) {
689        let inner = self.inner.take().unwrap();
690        inner.file.pager().dirty_pages(inner.file.vmo(), self.range.clone());
691    }
692}
693
694#[derive(Clone)]
695struct PagerRangeInner<T: std::clone::Clone + Deref<Target: PagerBacked>> {
696    // All generic types in the template must be cloneable to derive Clone, so we template the Arc
697    // instead of the inner type.
698    file: T,
699
700    /// Holds a reference to the current Epoch, so that in-flight read requests can be tracked. This
701    /// should be None for MarkDirty requests.
702    _epoch_guard: Option<EpochGuard<'static>>,
703}
704
705/// The requested range from a pager packet. This object ensures that all pager requests receive a
706/// response.
707pub struct PagerRange<T: PagerBacked, U: PagerRequestType> {
708    range: Range<u64>,
709
710    /// Contains the file and the ref guard. If this is None, then the request is complete.
711    inner: Option<PagerRangeInner<Arc<T>>>,
712
713    _request_type: PhantomData<U>,
714}
715
716impl<T: PagerBacked, U: PagerRequestType> PagerRange<T, U> {
717    /// Splits the underlying range allowing for different parts of the range to be handled and
718    /// responded to independently. See `RangeExt::split` for how splitting a range works.
719    /// `split_point` must be page aligned.
720    pub fn split(mut self, split_point: u64) -> (Option<Self>, Option<Self>) {
721        let inner = self.inner.take().unwrap();
722        let (left, right) = self.range.clone().split(split_point);
723        let right = right.map(|range| Self {
724            range,
725            inner: Some(inner.clone()),
726            _request_type: PhantomData,
727        });
728        let left = left.map(|range| Self { range, inner: Some(inner), _request_type: PhantomData });
729        (left, right)
730    }
731
732    /// Increases the size of the range that will be responded to. Panics if the current range is
733    /// not a subset of `new_range`. `new_range` must be page aligned.
734    pub fn expand(mut self, new_range: Range<u64>) -> Self {
735        assert!(
736            self.range.start >= new_range.start && self.range.end <= new_range.end,
737            "{:?} is not a subset of {:?}",
738            self.range,
739            new_range
740        );
741        debug_assert!(
742            new_range.start % page_size() == 0 && new_range.end % page_size() == 0,
743            "{:?} is not page aligned",
744            new_range
745        );
746        self.range = new_range;
747        self
748    }
749
750    /// Returns an iterator that splits the range into ranges of `chunk_size`. If the length of the
751    /// range is not a multiple of `chunk_size` then the last chunk won't be of length `chunk_size`.
752    /// The returned iterator will panic if it's dropped without being fully consumed. `chunk_size`
753    /// must a multiple of the page size.
754    pub fn chunks(mut self, chunk_size: u64) -> PagerRangeChunksIter<T, U> {
755        debug_assert!(
756            chunk_size % page_size() == 0,
757            "{} is not a multiple of the page size",
758            chunk_size
759        );
760        PagerRangeChunksIter {
761            start: self.range.start,
762            end: self.range.end,
763            chunk_size: chunk_size,
764            inner: self.inner.take(),
765            _request_type: PhantomData,
766        }
767    }
768
769    #[inline]
770    pub fn start(&self) -> u64 {
771        self.range.start
772    }
773
774    #[inline]
775    pub fn end(&self) -> u64 {
776        self.range.end
777    }
778
779    #[inline]
780    pub fn len(&self) -> u64 {
781        self.range.end - self.range.start
782    }
783
784    #[inline]
785    pub fn range(&self) -> Range<u64> {
786        self.range.clone()
787    }
788
789    /// Notifies the kernel that the page request for this range has failed. See `ZX_PAGER_OP_FAIL`
790    /// for more information.
791    pub fn report_failure(mut self, status: zx::Status) {
792        let inner = self.inner.take().unwrap();
793        inner.file.pager().report_failure(inner.file.vmo(), self.range.clone(), status);
794    }
795
796    /// Test only method that will consume the PagerRange without having the send a response.
797    #[cfg(test)]
798    fn consume(mut self) {
799        self.inner.take().unwrap();
800    }
801}
802
803impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRange<T, U> {
804    fn drop(&mut self) {
805        if let Some(inner) = &self.inner {
806            let request_type = U::request_type_name();
807            let range = self.range.clone();
808            let key = inner.file.pager_packet_receiver_registration().key();
809            if cfg!(debug_assertions) {
810                // If this object is being dropped as part of a panic then avoid panicking again.
811                // Dropping pager packets when fxfs is crashing is acceptable. Panicking again would
812                // only clutter the logs.
813                if !std::thread::panicking() {
814                    panic!(
815                        "PagerRange was dropped without sending a response, \
816                        request_type={request_type}, range={range:?}, key={key}",
817                    );
818                }
819            } else {
820                error!(
821                    "PagerRange was dropped without sending a response, \
822                    request_type={request_type}, range={range:?}, key={key}",
823                );
824                inner.file.pager().report_failure(inner.file.vmo(), range, zx::Status::BAD_STATE);
825            }
826        }
827    }
828}
829
830/// An iterator similar to `std::slice::Chunks` which yields `PagerRange` objects.
831/// `PagerRangeChunksIter` will panic if it's dropped without being fully consumed.
832pub struct PagerRangeChunksIter<T: PagerBacked, U: PagerRequestType> {
833    start: u64,
834    end: u64,
835    chunk_size: u64,
836    /// The file and locks/references that need to survive the request.
837    inner: Option<PagerRangeInner<Arc<T>>>,
838    _request_type: PhantomData<U>,
839}
840
841impl<T: PagerBacked, U: PagerRequestType> Iterator for PagerRangeChunksIter<T, U> {
842    type Item = PagerRange<T, U>;
843    fn next(&mut self) -> Option<Self::Item> {
844        if self.start == self.end {
845            None
846        } else if self.start + self.chunk_size >= self.end {
847            let next = Self::Item {
848                range: self.start..self.end,
849                inner: self.inner.take(),
850                _request_type: PhantomData,
851            };
852            self.start = self.end;
853            Some(next)
854        } else {
855            let next_end = self.start + self.chunk_size;
856            let next = Self::Item {
857                range: self.start..next_end,
858                inner: self.inner.clone(),
859                _request_type: PhantomData,
860            };
861            self.start = next_end;
862            Some(next)
863        }
864    }
865}
866
867impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRangeChunksIter<T, U> {
868    fn drop(&mut self) {
869        if self.start != self.end {
870            let request_type = U::request_type_name();
871            let remaining = self.start..self.end;
872            let inner = self.inner.take().unwrap();
873            let key = inner.file.pager_packet_receiver_registration().key();
874            if cfg!(debug_assertions) {
875                // If this object is being dropped as part of a panic then avoid panicking again.
876                // Dropping pager packets when fxfs is crashing is acceptable. Panicking again would
877                // only clutter the logs.
878                if !std::thread::panicking() {
879                    panic!(
880                        "PagerRangeChunksIter was dropped without being fully consumed, \
881                    request_type={request_type}, remaining={remaining:?}, key={key}",
882                    );
883                }
884            } else {
885                error!(
886                    "PagerRangeChunksIter was dropped without being fully consumed, \
887                    request_type={request_type}, remaining={remaining:?}, key={key}",
888                );
889                inner.file.pager().report_failure(
890                    inner.file.vmo(),
891                    remaining,
892                    zx::Status::BAD_STATE,
893                );
894            }
895        }
896    }
897}
898
899#[cfg(test)]
900mod tests {
901    use super::*;
902    use futures::StreamExt;
903    use futures::channel::mpsc;
904    use fxfs_macros::ToWeakNode;
905
906    #[derive(Clone, Debug, PartialEq, Eq)]
907    enum PagerRequest {
908        PageIn(Range<u64>),
909        Dirty(Range<u64>),
910    }
911
912    #[derive(ToWeakNode)]
913    struct MockFile {
914        vmo: zx::Vmo,
915        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
916        pager: Arc<Pager>,
917        /// page in requests get logged so we can compare actual calls to to expectations.
918        pager_requests: Mutex<Vec<PagerRequest>>,
919    }
920
921    impl MockFile {
922        fn new(pager: Arc<Pager>) -> Arc<Self> {
923            Self::new_with_size_and_type(pager, page_size(), zx::VmoOptions::UNBOUNDED)
924        }
925
926        fn new_with_size_and_type(
927            pager: Arc<Pager>,
928            size: u64,
929            vmo_type: zx::VmoOptions,
930        ) -> Arc<Self> {
931            Arc::new_cyclic(|weak| {
932                let (vmo, pager_packet_receiver_registration) = pager
933                    .create_vmo(weak.clone(), size, vmo_type | zx::VmoOptions::TRAP_DIRTY)
934                    .unwrap();
935                Self {
936                    pager,
937                    vmo,
938                    pager_packet_receiver_registration,
939                    pager_requests: Default::default(),
940                }
941            })
942        }
943
944        // Returns the page_in requests received for this file.
945        fn pager_requests(&self, reset: bool) -> Vec<PagerRequest> {
946            if reset {
947                std::mem::take(&mut *self.pager_requests.lock())
948            } else {
949                self.pager_requests.lock().clone()
950            }
951        }
952    }
953
954    impl FxNode for MockFile {
955        fn object_id(&self) -> u64 {
956            unimplemented!();
957        }
958
959        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
960            unimplemented!();
961        }
962
963        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
964            unimplemented!();
965        }
966
967        fn open_count_add_one(&self) {
968            unimplemented!();
969        }
970
971        fn open_count_sub_one(self: Arc<Self>) {
972            unimplemented!();
973        }
974
975        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
976            unimplemented!();
977        }
978    }
979
980    impl PagerBacked for MockFile {
981        fn pager(&self) -> &Pager {
982            &self.pager
983        }
984
985        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
986            &self.pager_packet_receiver_registration
987        }
988
989        fn vmo(&self) -> &zx::Vmo {
990            &self.vmo
991        }
992
993        fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
994            let aux_vmo = zx::Vmo::create(range.len()).unwrap();
995            self.pager_requests.lock().push(PagerRequest::PageIn(range.range()));
996            range.supply_pages(&aux_vmo, 0);
997        }
998
999        fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1000            self.pager_requests.lock().push(PagerRequest::Dirty(range.range()));
1001            range.dirty_pages();
1002        }
1003
1004        fn on_zero_children(self: Arc<Self>) {}
1005
1006        fn byte_size(&self) -> u64 {
1007            unimplemented!();
1008        }
1009        async fn aligned_read(
1010            &self,
1011            _aligned_byte_range: std::ops::Range<u64>,
1012        ) -> Result<buffer::Buffer<'_>, Error> {
1013            unimplemented!();
1014        }
1015    }
1016
1017    #[derive(ToWeakNode)]
1018    struct OnZeroChildrenFile {
1019        pager: Arc<Pager>,
1020        vmo: zx::Vmo,
1021        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1022        sender: Mutex<mpsc::UnboundedSender<()>>,
1023    }
1024
1025    impl OnZeroChildrenFile {
1026        fn new(pager: Arc<Pager>, sender: mpsc::UnboundedSender<()>) -> Arc<Self> {
1027            Arc::new_cyclic(|weak| {
1028                let (vmo, pager_packet_receiver_registration) =
1029                    pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1030                Self { pager, vmo, pager_packet_receiver_registration, sender: Mutex::new(sender) }
1031            })
1032        }
1033    }
1034
1035    impl FxNode for OnZeroChildrenFile {
1036        fn object_id(&self) -> u64 {
1037            unimplemented!();
1038        }
1039
1040        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1041            unimplemented!();
1042        }
1043
1044        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1045            unimplemented!();
1046        }
1047
1048        fn open_count_add_one(&self) {
1049            unimplemented!();
1050        }
1051
1052        fn open_count_sub_one(self: Arc<Self>) {
1053            unimplemented!();
1054        }
1055
1056        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1057            unimplemented!();
1058        }
1059    }
1060
1061    impl PagerBacked for OnZeroChildrenFile {
1062        fn pager(&self) -> &Pager {
1063            &self.pager
1064        }
1065
1066        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1067            &self.pager_packet_receiver_registration
1068        }
1069
1070        fn vmo(&self) -> &zx::Vmo {
1071            &self.vmo
1072        }
1073
1074        fn page_in(self: Arc<Self>, _range: PageInRange<Self>) {
1075            unreachable!();
1076        }
1077
1078        fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1079            unreachable!();
1080        }
1081
1082        fn on_zero_children(self: Arc<Self>) {
1083            self.sender.lock().unbounded_send(()).unwrap();
1084        }
1085        fn byte_size(&self) -> u64 {
1086            unreachable!();
1087        }
1088        async fn aligned_read(
1089            &self,
1090            _aligned_byte_range: std::ops::Range<u64>,
1091        ) -> Result<buffer::Buffer<'_>, Error> {
1092            unreachable!();
1093        }
1094    }
1095
1096    #[fuchsia::test(threads = 2)]
1097    async fn test_watch_for_zero_children() {
1098        let (sender, mut receiver) = mpsc::unbounded();
1099        let scope = ExecutionScope::new();
1100        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1101        let file = OnZeroChildrenFile::new(pager.clone(), sender);
1102        {
1103            let _child_vmo = file
1104                .vmo()
1105                .create_child(
1106                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1107                    0,
1108                    file.vmo().get_content_size().unwrap(),
1109                )
1110                .unwrap();
1111            assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1112        }
1113        // Wait for `on_zero_children` to be called.
1114        receiver.next().await.unwrap();
1115
1116        scope.wait().await;
1117    }
1118
1119    #[fuchsia::test(threads = 2)]
1120    async fn test_multiple_watch_for_zero_children_calls() {
1121        let (sender, mut receiver) = mpsc::unbounded();
1122        let scope = ExecutionScope::new();
1123        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1124        let file = OnZeroChildrenFile::new(pager.clone(), sender);
1125        {
1126            let _child_vmo = file
1127                .vmo()
1128                .create_child(
1129                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1130                    0,
1131                    file.vmo().get_content_size().unwrap(),
1132                )
1133                .unwrap();
1134            assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1135            // `watch_for_zero_children` will return false when it's already watching.
1136            assert!(!pager.watch_for_zero_children(file.as_ref()).unwrap());
1137        }
1138        receiver.next().await.unwrap();
1139
1140        // The pager stops listening for VMO_ZERO_CHILDREN once the signal fires. Calling
1141        // `watch_for_zero_children` afterwards should return true again because watching had
1142        // stopped.
1143        assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1144
1145        file.pager_packet_receiver_registration.stop_watching_for_zero_children();
1146
1147        scope.wait().await;
1148    }
1149
1150    #[fuchsia::test(threads = 2)]
1151    async fn test_status_code_mapping() {
1152        #[derive(ToWeakNode)]
1153        struct StatusCodeFile {
1154            vmo: zx::Vmo,
1155            pager: Arc<Pager>,
1156            status_code: Mutex<zx::Status>,
1157            pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1158        }
1159
1160        impl FxNode for StatusCodeFile {
1161            fn object_id(&self) -> u64 {
1162                unimplemented!();
1163            }
1164
1165            fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1166                unimplemented!();
1167            }
1168
1169            fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1170                unimplemented!();
1171            }
1172
1173            fn open_count_add_one(&self) {
1174                unimplemented!();
1175            }
1176
1177            fn open_count_sub_one(self: Arc<Self>) {
1178                unimplemented!();
1179            }
1180
1181            fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1182                unimplemented!();
1183            }
1184        }
1185
1186        impl PagerBacked for StatusCodeFile {
1187            fn pager(&self) -> &Pager {
1188                &self.pager
1189            }
1190
1191            fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1192                &self.pager_packet_receiver_registration
1193            }
1194
1195            fn vmo(&self) -> &zx::Vmo {
1196                &self.vmo
1197            }
1198
1199            fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1200                range.report_failure(*self.status_code.lock());
1201            }
1202
1203            fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1204                unreachable!();
1205            }
1206
1207            fn on_zero_children(self: Arc<Self>) {
1208                unreachable!();
1209            }
1210
1211            fn byte_size(&self) -> u64 {
1212                unreachable!();
1213            }
1214            async fn aligned_read(
1215                &self,
1216                _aligned_byte_range: std::ops::Range<u64>,
1217            ) -> Result<buffer::Buffer<'_>, Error> {
1218                unreachable!();
1219            }
1220        }
1221
1222        let scope = ExecutionScope::new();
1223        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1224        let file = Arc::new_cyclic(|weak| {
1225            let (vmo, pager_packet_receiver_registration) =
1226                pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1227            StatusCodeFile {
1228                vmo,
1229                pager: pager.clone(),
1230                status_code: Mutex::new(zx::Status::INTERNAL),
1231                pager_packet_receiver_registration,
1232            }
1233        });
1234
1235        fn check_mapping(
1236            file: &StatusCodeFile,
1237            failure_code: zx::Status,
1238            expected_code: zx::Status,
1239        ) {
1240            {
1241                *file.status_code.lock() = failure_code;
1242            }
1243            let mut buf = [0u8; 8];
1244            assert_eq!(file.vmo().read(&mut buf, 0).unwrap_err(), expected_code);
1245        }
1246        check_mapping(&file, zx::Status::IO_DATA_INTEGRITY, zx::Status::IO_DATA_INTEGRITY);
1247        check_mapping(&file, zx::Status::NO_SPACE, zx::Status::NO_SPACE);
1248        check_mapping(&file, zx::Status::FILE_BIG, zx::Status::BUFFER_TOO_SMALL);
1249        check_mapping(&file, zx::Status::IO, zx::Status::IO);
1250        check_mapping(&file, zx::Status::IO_DATA_LOSS, zx::Status::IO);
1251        check_mapping(&file, zx::Status::NOT_EMPTY, zx::Status::BAD_STATE);
1252        check_mapping(&file, zx::Status::BAD_STATE, zx::Status::BAD_STATE);
1253
1254        scope.wait().await;
1255    }
1256
1257    #[fuchsia::test(threads = 2)]
1258    async fn test_query_vmo_stats() {
1259        let scope = ExecutionScope::new();
1260        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1261        let file = MockFile::new(pager.clone());
1262
1263        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1264        // The VMO hasn't been modified yet.
1265        assert!(!stats.was_vmo_modified());
1266
1267        file.vmo().write(&[0, 1, 2, 3, 4], 0).unwrap();
1268        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1269        assert!(stats.was_vmo_modified());
1270
1271        // Reset the stats this time.
1272        let stats =
1273            pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::RESET_VMO_STATS).unwrap();
1274        // The stats weren't reset last time so the stats are still showing that the vmo is modified.
1275        assert!(stats.was_vmo_modified());
1276
1277        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1278        assert!(!stats.was_vmo_modified());
1279
1280        scope.wait().await;
1281    }
1282
1283    #[fuchsia::test(threads = 2)]
1284    async fn test_query_dirty_ranges() {
1285        // Some notes on our paging implementation:
1286        //  * Fxfs uses UNBOUNDED VMO. These are maximally sized at creation time with
1287        //    stream size holding the content length.
1288        //  * Like regular VMO, all pages are initially in an unknown state. When a page
1289        //    is first accessed, the pager (Fxfs) will be asked to page in content.
1290        //  * Size can be set as a property, via set_content_size or via set_stream_size
1291        //    but only set_stream_size() should ever be used. This ensures that the tail
1292        //    is correctly zeroed.
1293        let scope = ExecutionScope::new();
1294        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1295        let file = MockFile::new_with_size_and_type(
1296            pager.clone(),
1297            page_size() + page_size() / 2,
1298            zx::VmoOptions::UNBOUNDED,
1299        );
1300        let mut buffer = vec![VmoDirtyRange::default(); 2];
1301
1302        let page_size = page_size();
1303        assert_eq!(file.vmo().get_content_size().unwrap(), page_size + page_size / 2);
1304
1305        let (actual, remaining) =
1306            pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1307        assert_eq!(actual, 0);
1308        assert_eq!(remaining, 0);
1309
1310        // Grow the VMO content size from 1.5 pages to 7.5 pages.
1311        file.vmo().set_stream_size(page_size * 7 + page_size / 2).unwrap();
1312
1313        let (actual, remaining) =
1314            pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1315        assert_eq!(actual, 2);
1316        assert_eq!(remaining, 0);
1317        // Second page must be assumed to contain data so tail is zeroed.
1318        assert_eq!(buffer[0].range(), page_size..page_size * 2);
1319        assert!(!buffer[0].is_zero_range());
1320        // All pages after that are marked as zero.
1321        assert_eq!(buffer[1].range(), page_size * 2..page_size * 8);
1322        assert!(buffer[1].is_zero_range());
1323
1324        // We expect the tail page to have been read as part of the zeroing when we grew the size.
1325        // It will then be marked dirty (modified)
1326        assert_eq!(
1327            file.pager_requests(true),
1328            vec![
1329                PagerRequest::PageIn(page_size * 1..page_size * 2),
1330                PagerRequest::Dirty(page_size * 1..page_size * 2),
1331            ]
1332        );
1333
1334        // Modify the 2nd, 3rd, and 5th pages.
1335        file.vmo().write(&[1, 2, 3, 4], page_size).unwrap();
1336        file.vmo().write(&[1, 2, 3, 4], page_size * 2).unwrap();
1337        file.vmo().write(&[1, 2, 3, 4], page_size * 4).unwrap();
1338
1339        // Pages are known zero because we just grew the file.
1340        // We don't expect any page-in requests for them.
1341        assert_eq!(
1342            file.pager_requests(true),
1343            vec![
1344                PagerRequest::Dirty(page_size * 2..page_size * 3),
1345                PagerRequest::Dirty(page_size * 4..page_size * 5)
1346            ]
1347        );
1348
1349        let (actual, remaining) =
1350            pager.query_dirty_ranges(file.vmo(), 0..page_size * 7, &mut buffer).unwrap();
1351        assert_eq!(actual, 2);
1352        assert_eq!(remaining, 2);
1353        // Second and third pages (non-zero)
1354        assert_eq!(buffer[0].range(), page_size..(page_size * 3));
1355        assert!(!buffer[0].is_zero_range());
1356        // Fourth page is zero.
1357        assert_eq!(buffer[1].range(), (page_size * 3)..(page_size * 4));
1358        assert!(buffer[1].is_zero_range());
1359
1360        let (actual, remaining) = pager
1361            .query_dirty_ranges(file.vmo(), page_size * 4..page_size * 7, &mut buffer)
1362            .unwrap();
1363        assert_eq!(actual, 2);
1364        assert_eq!(remaining, 0);
1365        // Fifth page (non-zero)
1366        assert_eq!(buffer[0].range(), (page_size * 4)..(page_size * 5));
1367        assert!(!buffer[0].is_zero_range());
1368        // Rest of the VMO is zero.
1369        assert_eq!(buffer[1].range(), (page_size * 5)..(page_size * 7));
1370        assert!(buffer[1].is_zero_range());
1371
1372        // Read the 4th page.
1373        let mut read_buf = vec![0u8; page_size as usize];
1374        file.vmo().read(&mut read_buf, page_size * 3).expect("read");
1375        let expected = vec![0u8; page_size as usize];
1376        assert_eq!(read_buf, expected);
1377        assert_eq!(file.pager_requests(true), vec![]);
1378
1379        scope.wait().await;
1380    }
1381
1382    #[fuchsia::test(threads = 2)]
1383    async fn test_zero_grown_vmo() {
1384        // When a VMO's content size is explicitly grown, check that new content is zeroed.
1385        let scope = ExecutionScope::new();
1386        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1387        let file = MockFile::new(pager.clone());
1388
1389        let write_buf = vec![0xff; page_size() as usize * 2];
1390        file.vmo().set_stream_size(page_size() * 2).expect("grow");
1391        file.vmo().write(&write_buf, 0).expect("write");
1392        let mut read_buf = vec![0u8; page_size() as usize * 2];
1393        // The tail beyond the content size will written.
1394        file.vmo().read(&mut read_buf, 0).expect("read");
1395        assert_eq!(read_buf, write_buf);
1396
1397        // The tail beyond the new content size should be zeroed.
1398        file.vmo().set_stream_size(page_size() + 1).expect("shrink");
1399        file.vmo().write(&[0xff; 3], page_size() + 2).expect("write after shrink");
1400        // To make sure the above content size change actually zeroed data, we grow again.
1401        file.vmo().set_stream_size(page_size() + 4).expect("grow again");
1402        let mut read_buf = vec![0u8; page_size() as usize];
1403        file.vmo().read(&mut read_buf, page_size()).expect("read");
1404        let mut expected = vec![0u8; page_size() as usize];
1405        expected[0] = 0xff;
1406        assert_eq!(read_buf, expected);
1407
1408        scope.wait().await;
1409    }
1410
1411    #[fuchsia::test]
1412    async fn test_pager_range_chunks_iter_chunks() {
1413        let scope = ExecutionScope::new();
1414        let pager = Arc::new(Pager::new(scope).unwrap());
1415        let file = MockFile::new(pager.clone());
1416
1417        let pager_range = PageInRange::new(0..page_size() * 5, file, Epoch::global().guard());
1418        let ranges: Vec<Range<u64>> = pager_range
1419            .chunks(page_size() * 2)
1420            .map(|pager_range| {
1421                let range = pager_range.range();
1422                pager_range.consume();
1423                range
1424            })
1425            .collect();
1426        assert_eq!(
1427            ranges,
1428            [
1429                0..page_size() * 2,
1430                page_size() * 2..page_size() * 4,
1431                page_size() * 4..page_size() * 5
1432            ]
1433        );
1434    }
1435
1436    #[fuchsia::test]
1437    async fn test_pager_range_split() {
1438        let scope = ExecutionScope::new();
1439        let pager = Arc::new(Pager::new(scope).unwrap());
1440        let file = MockFile::new(pager.clone());
1441
1442        let pager_range = PageInRange::new(0..page_size() * 10, file, Epoch::global().guard());
1443        let (left, right) = pager_range.split(page_size() * 5);
1444        let (left, right) = (left.unwrap(), right.unwrap());
1445        assert_eq!(left.range(), 0..page_size() * 5);
1446        assert_eq!(right.range(), page_size() * 5..page_size() * 10);
1447
1448        left.consume();
1449        right.consume();
1450    }
1451
1452    #[fuchsia::test]
1453    #[should_panic(expected = "0..8192 is not a subset of 0..4096")]
1454    async fn test_pager_range_bad_expand_panics() {
1455        let scope = ExecutionScope::new();
1456        let pager = Arc::new(Pager::new(scope).unwrap());
1457        let file = MockFile::new(pager.clone());
1458
1459        let pager_range = PageInRange::new(0..page_size() * 2, file, Epoch::global().guard());
1460        pager_range.expand(0..page_size()).consume();
1461    }
1462
1463    #[derive(ToWeakNode)]
1464    struct PagerRangeTestFile {
1465        vmo: zx::Vmo,
1466        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1467        pager: Pager,
1468        page_in_fn: Box<dyn Fn(PageInRange<Self>) + Send + Sync + 'static>,
1469        mark_dirty_fn: Box<dyn Fn(MarkDirtyRange<Self>) + Send + Sync + 'static>,
1470    }
1471
1472    impl PagerRangeTestFile {
1473        fn new<
1474            F1: Fn(PageInRange<Self>) + Send + Sync + 'static,
1475            F2: Fn(MarkDirtyRange<Self>) + Send + Sync + 'static,
1476        >(
1477            page_in_fn: F1,
1478            mark_dirty_fn: F2,
1479        ) -> Arc<Self> {
1480            Arc::new_cyclic(|weak| {
1481                let pager = Pager::new(ExecutionScope::new()).unwrap();
1482                let (vmo, pager_packet_receiver_registration) = pager
1483                    .create_vmo(weak.clone(), page_size() * 2, zx::VmoOptions::TRAP_DIRTY)
1484                    .unwrap();
1485                Self {
1486                    vmo,
1487                    pager_packet_receiver_registration,
1488                    pager,
1489                    page_in_fn: Box::new(page_in_fn),
1490                    mark_dirty_fn: Box::new(mark_dirty_fn),
1491                }
1492            })
1493        }
1494    }
1495
1496    impl FxNode for PagerRangeTestFile {
1497        fn object_id(&self) -> u64 {
1498            1
1499        }
1500
1501        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1502            unimplemented!()
1503        }
1504
1505        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1506            unimplemented!()
1507        }
1508
1509        fn open_count_add_one(&self) {
1510            unimplemented!()
1511        }
1512
1513        fn open_count_sub_one(self: Arc<Self>) {
1514            unimplemented!()
1515        }
1516
1517        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1518            unimplemented!()
1519        }
1520    }
1521
1522    impl PagerBacked for PagerRangeTestFile {
1523        fn pager(&self) -> &Pager {
1524            &self.pager
1525        }
1526
1527        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1528            &self.pager_packet_receiver_registration
1529        }
1530
1531        fn vmo(&self) -> &zx::Vmo {
1532            &self.vmo
1533        }
1534
1535        fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1536            (self.page_in_fn)(range)
1537        }
1538
1539        fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1540            (self.mark_dirty_fn)(range)
1541        }
1542
1543        fn on_zero_children(self: Arc<Self>) {}
1544
1545        fn byte_size(&self) -> u64 {
1546            unimplemented!();
1547        }
1548
1549        async fn aligned_read(
1550            &self,
1551            _range: std::ops::Range<u64>,
1552        ) -> Result<buffer::Buffer<'_>, Error> {
1553            unimplemented!();
1554        }
1555    }
1556
1557    fn real_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1558        let aux_vmo = zx::Vmo::create(range.len()).unwrap();
1559        range.supply_pages(&aux_vmo, 0);
1560    }
1561
1562    fn real_mark_dirty(range: MarkDirtyRange<PagerRangeTestFile>) {
1563        range.dirty_pages();
1564    }
1565
1566    #[fuchsia::test(threads = 2)]
1567    async fn test_page_in_range_supply_pages() {
1568        let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1569
1570        let mut data = vec![0; 20];
1571        file.vmo.read(&mut data, 0).unwrap();
1572    }
1573
1574    #[fuchsia::test(threads = 2)]
1575    async fn test_page_in_range_report_failure() {
1576        let file = PagerRangeTestFile::new(
1577            |range| {
1578                range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1579            },
1580            real_mark_dirty,
1581        );
1582
1583        let mut data = vec![0; 20];
1584        let err = file.vmo.read(&mut data, 0).unwrap_err();
1585        assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1586    }
1587
1588    #[cfg(debug_assertions)]
1589    #[fuchsia::test(threads = 2)]
1590    #[should_panic(expected = "PagerRange was dropped without sending a response")]
1591    async fn test_page_in_range_dropped() {
1592        let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1593
1594        let mut data = vec![0; 20];
1595        file.vmo.read(&mut data, 0).unwrap_err();
1596    }
1597
1598    #[cfg(not(debug_assertions))]
1599    #[fuchsia::test(threads = 2)]
1600    async fn test_page_in_range_dropped() {
1601        let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1602
1603        let mut data = vec![0; 20];
1604        let err = file.vmo.read(&mut data, 0).unwrap_err();
1605        assert_eq!(err, zx::Status::BAD_STATE);
1606    }
1607
1608    #[fuchsia::test(threads = 2)]
1609    async fn test_mark_dirty_range_dirty_pages() {
1610        let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1611
1612        let data = vec![5; 20];
1613        file.vmo.write(&data, 0).unwrap();
1614    }
1615
1616    #[fuchsia::test(threads = 2)]
1617    async fn test_mark_dirty_range_report_failure() {
1618        let file = PagerRangeTestFile::new(real_supply_pages, |range| {
1619            range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1620        });
1621
1622        let data = vec![5; 20];
1623        let err = file.vmo.write(&data, 0).unwrap_err();
1624        assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1625    }
1626
1627    #[cfg(debug_assertions)]
1628    #[fuchsia::test(threads = 2)]
1629    #[should_panic(expected = "PagerRange was dropped without sending a response")]
1630    async fn test_mark_dirty_range_dropped() {
1631        let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1632
1633        let data = vec![5; 20];
1634        file.vmo.write(&data, 0).unwrap_err();
1635    }
1636
1637    #[cfg(not(debug_assertions))]
1638    #[fuchsia::test(threads = 2)]
1639    async fn test_mark_dirty_range_dropped() {
1640        let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1641
1642        let data = vec![5; 20];
1643        let err = file.vmo.write(&data, 0).unwrap_err();
1644        assert_eq!(err, zx::Status::BAD_STATE);
1645    }
1646
1647    #[fuchsia::test(threads = 2)]
1648    async fn test_pager_range_chunks_iter_consumed() {
1649        let file = PagerRangeTestFile::new(
1650            |range| {
1651                let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1652                range.expand(0..page_size() * 2).chunks(page_size()).for_each(|range| {
1653                    range.supply_pages(&aux_vmo, 0);
1654                });
1655            },
1656            real_mark_dirty,
1657        );
1658
1659        let mut data = vec![0; 20];
1660        file.vmo.read(&mut data, 0).unwrap();
1661    }
1662
1663    fn partial_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1664        let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1665        // Expand the range to 2 pages and only supply the first page, dropping the iterator without
1666        // fully consuming it.
1667        range.expand(0..page_size() * 2).chunks(page_size()).take(1).for_each(|range| {
1668            range.supply_pages(&aux_vmo, 0);
1669        });
1670    }
1671
1672    #[cfg(debug_assertions)]
1673    #[fuchsia::test(threads = 2)]
1674    #[should_panic(expected = "PagerRangeChunksIter was dropped without being fully consumed")]
1675    async fn test_pager_range_chunks_iter_dropped() {
1676        let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1677
1678        let mut data = vec![0; 20];
1679        // Ask for the 2nd page. The range will be expanded to the first 2 pages. The first page
1680        // will succeed and the second page will be dropped.
1681        file.vmo.read(&mut data, page_size()).unwrap_err();
1682    }
1683
1684    #[cfg(not(debug_assertions))]
1685    #[fuchsia::test(threads = 2)]
1686    async fn test_pager_range_chunks_iter_dropped() {
1687        let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1688
1689        let mut data = vec![0; 20];
1690        // Ask for the 2nd page. The range will be expanded to the first 2 pages. The first page
1691        // will succeed and the second page will be dropped.
1692        let err = file.vmo.read(&mut data, page_size()).unwrap_err();
1693        assert_eq!(err, zx::Status::BAD_STATE);
1694    }
1695
1696    #[fuchsia::test(threads = 2)]
1697    async fn test_grow_zeroes_new_bytes() {
1698        // We expect that when we grow a file, the pages between the old and the new size
1699        // are zeroed. Reads and writes to these pages after growing a file should NOT
1700        // trigger any page-in requests.
1701        let scope = ExecutionScope::new();
1702        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1703        let page_size = page_size();
1704        let vmo_size: u64 = page_size * 2;
1705        let file_a =
1706            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::RESIZABLE);
1707        let file_b =
1708            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1709        let mut buffer = vec![VmoDirtyRange::default(); 3];
1710
1711        assert_eq!(file_a.vmo().get_stream_size().unwrap(), page_size * 2);
1712        assert_eq!(file_b.vmo().get_stream_size().unwrap(), page_size * 2);
1713
1714        // Page in is expected.
1715        let mut read_buf = vec![0u8; page_size as usize];
1716        file_a.vmo().read(&mut read_buf, page_size).expect("read a");
1717        assert_eq!(
1718            file_a.pager_requests(true),
1719            vec![PagerRequest::PageIn(page_size..page_size * 2)]
1720        );
1721        file_b.vmo().read(&mut read_buf, page_size).expect("read b");
1722        assert_eq!(
1723            file_b.pager_requests(true),
1724            vec![PagerRequest::PageIn(page_size..page_size * 2)]
1725        );
1726
1727        // Grow the VMO size and confirm intermediate pages (2..8) are zero.
1728        let vmo_size = page_size * 8;
1729        file_a.vmo().set_size(vmo_size).unwrap();
1730        file_b.vmo().set_stream_size(vmo_size).unwrap();
1731
1732        assert_eq!(
1733            pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1734            (1, 0)
1735        );
1736        assert_eq!(
1737            buffer[0],
1738            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1739        );
1740        assert_eq!(
1741            pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1742            (1, 0)
1743        );
1744        assert_eq!(
1745            buffer[0],
1746            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1747        );
1748
1749        // The extra pages are all zero. We shouldn't see any page_in requests.
1750        let mut read_buf = vec![0u8; page_size as usize * 6];
1751        file_a.vmo().read(&mut read_buf, page_size * 2).expect("read a");
1752        assert_eq!(file_a.pager_requests(true), vec![]);
1753        file_b.vmo().read(&mut read_buf, page_size * 2).expect("read b");
1754        assert_eq!(file_b.pager_requests(true), vec![]);
1755
1756        // Grow again and check that pager gets notified.
1757        let vmo_size = page_size * 8;
1758        file_a.vmo().set_size(vmo_size).unwrap();
1759        file_b.vmo().set_stream_size(vmo_size).unwrap();
1760        assert_eq!(
1761            pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1762            (1, 0)
1763        );
1764        assert_eq!(
1765            buffer[0],
1766            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1767        );
1768        assert_eq!(
1769            pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1770            (1, 0)
1771        );
1772        assert_eq!(
1773            buffer[0],
1774            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1775        );
1776        // No pager requests. All new pages are assumed zero.
1777        assert_eq!(file_a.pager_requests(true), vec![],);
1778        assert_eq!(file_b.pager_requests(true), vec![],);
1779
1780        // Modifying a page in this new region should trigger a dirty message to the pager.
1781        file_b.vmo().write(&[1; 10], page_size * 2).unwrap();
1782        assert_eq!(
1783            file_b.pager_requests(true),
1784            vec![PagerRequest::Dirty(page_size * 2..page_size * 3)],
1785        );
1786
1787        // Shrink again to 4 pages and then append a page via zx_stream_write (WRITE)
1788        let vmo_size = page_size * 4;
1789        file_b.vmo().set_stream_size(vmo_size).unwrap();
1790        let stream =
1791            zx::Stream::create(zx::StreamOptions::MODE_WRITE, file_b.vmo(), page_size * 4).unwrap();
1792        stream.write(zx::StreamWriteOptions::empty(), &vec![10; page_size as usize]).unwrap();
1793        assert_eq!(
1794            file_b.pager_requests(true),
1795            vec![PagerRequest::Dirty(page_size * 4..page_size * 5)],
1796        );
1797
1798        // Append a page via zx_stream_write (APPEND)
1799        let stream = zx::Stream::create(
1800            zx::StreamOptions::MODE_WRITE | zx::StreamOptions::MODE_APPEND,
1801            file_b.vmo(),
1802            page_size * 5,
1803        )
1804        .unwrap();
1805        stream.write(zx::StreamWriteOptions::empty(), &[10; 1024]).unwrap();
1806        assert_eq!(
1807            file_b.pager_requests(true),
1808            vec![PagerRequest::Dirty(page_size * 5..page_size * 6)],
1809        );
1810
1811        scope.wait().await;
1812    }
1813
1814    #[fuchsia::test(threads = 2)]
1815    async fn test_pathological_shrink_unbounded_vmo() {
1816        let scope = ExecutionScope::new();
1817        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1818        let page_size = page_size();
1819        let vmo_size: u64 = page_size * 25600; // 100MiB
1820        let file =
1821            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1822        let mut buffer = vec![VmoDirtyRange::default(); 10];
1823
1824        assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1825
1826        // Shrinking by a small step to check that last page truncation works as expected.
1827        for i in 0..vmo_size / 256 {
1828            let data = vec![5; 20];
1829            file.vmo.write(&data, i * 256).expect("write failed");
1830        }
1831
1832        for i in (0..25600u64 / 1024).rev() {
1833            file.vmo().set_stream_size(i * 1024 + page_size / 2).unwrap();
1834        }
1835
1836        assert_eq!(pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap(), (1, 0));
1837        assert_eq!(buffer[0..1], [VmoDirtyRange { offset: 0, length: page_size, options: 0 },]);
1838
1839        scope.wait().await;
1840    }
1841
1842    #[fuchsia::test(threads = 2)]
1843    async fn test_pathological_shrink_unbounded_vmo_with_gaps() {
1844        let scope = ExecutionScope::new();
1845        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1846        let page_size = page_size();
1847        let vmo_size: u64 = page_size * 25600; // 100MiB
1848        let file =
1849            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1850        let mut buffer = vec![VmoDirtyRange::default(); 10];
1851
1852        assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1853
1854        // Write every second page.
1855        for offset in (0u64..vmo_size).step_by((page_size * 2) as usize) {
1856            let data = vec![5; 20];
1857            file.vmo.write(&data, offset).expect("write failed");
1858        }
1859        // Every second page should be dirty.
1860        let (actual, remaining) =
1861            pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap();
1862        assert_eq!(actual + remaining, 25600 / 2);
1863
1864        // Avoid page-aligned sizes to ensure we test the partial page code paths.
1865        let mut offset = vmo_size.saturating_sub(5 * page_size - 2);
1866        // Shrink by 5 pages, then 4 pages. This covers all possible arrangements of
1867        // start/end being on zero and non-zero pages.
1868        'outer: loop {
1869            for delta in [5 * page_size, 4 * page_size] {
1870                file.vmo().set_stream_size(offset).unwrap();
1871                assert_eq!(
1872                    pager.query_dirty_ranges(file.vmo(), offset..vmo_size, &mut buffer).unwrap(),
1873                    (1, 0)
1874                );
1875                // We do not expect to see dirty pages beyond stream size.
1876                assert_eq!(
1877                    buffer[0..1],
1878                    [VmoDirtyRange {
1879                        offset: round_down(offset, page_size),
1880                        length: page_size,
1881                        options: 0
1882                    },]
1883                );
1884                offset = offset.saturating_sub(delta);
1885                if offset == 0 {
1886                    break 'outer;
1887                }
1888            }
1889        }
1890
1891        scope.wait().await;
1892    }
1893
1894    #[fuchsia::test(threads = 2)]
1895    async fn test_grow_unbounded_vmo() {
1896        let scope = ExecutionScope::new();
1897        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1898        let file = MockFile::new_with_size_and_type(pager.clone(), 128, zx::VmoOptions::UNBOUNDED);
1899
1900        let data = vec![1; 128];
1901        // Overwrite the 128 after the content size;
1902        file.vmo().write(&data, 128).expect("write failed");
1903        // Grow the VMO to include the newly written bytes.
1904        file.vmo().set_stream_size(256).unwrap();
1905        assert_eq!(file.vmo().get_stream_size().expect("get_stream_size"), 256);
1906
1907        let mut data = vec![0xff; 256];
1908        file.vmo().read(&mut data, 0).expect("read");
1909        let expected = vec![0; 256];
1910        assert_eq!(data, expected);
1911
1912        file.vmo().set_stream_size(page_size() * 3).unwrap();
1913        let mut buffer = vec![VmoDirtyRange::default(); 10];
1914        assert_eq!(
1915            pager.query_dirty_ranges(file.vmo(), 0..page_size() * 3, &mut buffer).unwrap(),
1916            (2, 0)
1917        );
1918        // We expect to see only zero pages beyond content size.
1919        assert_eq!(
1920            buffer[0..2],
1921            [
1922                VmoDirtyRange { offset: 0, length: page_size(), options: 0 },
1923                VmoDirtyRange { offset: page_size(), length: page_size() * 2, options: 1 },
1924            ]
1925        );
1926
1927        scope.wait().await;
1928    }
1929}