Skip to main content

fxfs_platform/fuchsia/
pager.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fuchsia::errors::map_to_status;
6use crate::fuchsia::node::FxNode;
7use crate::fuchsia::profile::Recorder;
8use anyhow::Error;
9use bitflags::bitflags;
10use fuchsia_async::epoch::{Epoch, EpochGuard};
11use fuchsia_async::{self as fasync};
12use fuchsia_sync::{Mutex, MutexGuard};
13use fxfs::future_with_guard::FutureWithGuard;
14use fxfs::log::*;
15use fxfs::range::RangeExt;
16use fxfs::round::{round_down, round_up};
17use std::future::Future;
18use std::marker::PhantomData;
19use std::mem::MaybeUninit;
20use std::ops::{Deref, Range};
21use std::sync::atomic::{AtomicU64, Ordering};
22use std::sync::{Arc, Weak};
23use storage_device::buffer;
24use vfs::execution_scope::ExecutionScope;
25use zx::sys::zx_page_request_command_t::{ZX_PAGER_VMO_DIRTY, ZX_PAGER_VMO_READ};
26use zx::{PacketContents, PagerPacket, SignalPacket};
27
28pub static STRONG_FILE_REFS: AtomicU64 = AtomicU64::new(0);
29
30fn watch_for_zero_children(file: &impl PagerBacked) -> Result<(), zx::Status> {
31    file.vmo().wait_async(
32        file.pager().executor.port(),
33        file.pager_packet_receiver_registration().key(),
34        zx::Signals::VMO_ZERO_CHILDREN,
35        zx::WaitAsyncOpts::empty(),
36    )
37}
38
39pub type PagerPacketReceiverRegistration<T> = fasync::ReceiverRegistration<PagerPacketReceiver<T>>;
40
41/// A `fuchsia_async::PacketReceiver` that handles pager packets and the `VMO_ZERO_CHILDREN` signal.
42pub struct PagerPacketReceiver<T> {
43    file: Mutex<FileHolder<T>>,
44}
45
46/// A returnable lock held on the receiver.
47pub struct PagerPacketReceiverLock<'a, T> {
48    _guard: MutexGuard<'a, FileHolder<T>>,
49    strong: bool,
50}
51
52impl<T> PagerPacketReceiverLock<'_, T> {
53    /// Returns true if the receiver was installed as a strong.
54    pub fn is_strong(&self) -> bool {
55        self.strong
56    }
57}
58
59impl<T: PagerBacked> PagerPacketReceiver<T> {
60    /// Drops the strong reference to the file that might be held if
61    /// `Pager::watch_for_zero_children` was called. This should only be used when forcibly dropping
62    /// the file object. Calls `on_zero_children` if the strong reference was held.
63    pub fn stop_watching_for_zero_children(&self) {
64        let mut file = self.file.lock();
65        if let FileHolder::Strong(strong) = &*file {
66            let weak = FileHolder::Weak(Arc::downgrade(&strong));
67            let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
68                unreachable!();
69            };
70            STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
71            strong.on_zero_children();
72        }
73    }
74
75    /// Sets the current receiver and returns the lock guard so that it can be held after the value
76    /// is set. Currently this allows synchronizing open count adjustments.
77    pub fn set_receiver(&self, new_receiver: &Arc<T>) -> PagerPacketReceiverLock<'_, T> {
78        let mut receiver_lock = self.file.lock();
79        let strong = match &mut *receiver_lock {
80            FileHolder::Strong(arc) => {
81                *arc = new_receiver.clone();
82                true
83            }
84            FileHolder::Weak(arc) => {
85                *arc = Arc::downgrade(new_receiver);
86                false
87            }
88        };
89        PagerPacketReceiverLock { _guard: receiver_lock, strong }
90    }
91
92    fn receive_pager_packet(&self, contents: PagerPacket) {
93        let command = contents.command();
94        if command != ZX_PAGER_VMO_READ && command != ZX_PAGER_VMO_DIRTY {
95            return;
96        }
97
98        let (file, epoch_guard) = {
99            let file_lock = self.file.lock();
100            let file = match &*file_lock {
101                FileHolder::Strong(file) => file.clone(),
102                FileHolder::Weak(file) => {
103                    if let Some(file) = file.upgrade() {
104                        file
105                    } else {
106                        error!("Received a page request for a file that is closed {:?}", contents);
107                        return;
108                    }
109                }
110            };
111
112            // Whenever a file is flushed, we must make sure existing page requests for a file are
113            // completed to eliminate the possibility of supplying stale data for a file.  We solve
114            // this by using a barrier when we flush to wait for outstanding page requests to
115            // finish.  Technically, we only need to wait for page requests for the specific file
116            // being flushed, but we should see if we need to for performance reasons first.
117            let epoch_guard = match command {
118                // Don't take refs for mark_dirty, it can block on flushes which block on the epoch
119                // creating a deadlock. The call for awaiting epochs is `page_in_barrier` which
120                // correctly implies that it should only wait on page in.
121                ZX_PAGER_VMO_READ => Some(Epoch::global().guard()),
122                _ => None,
123            };
124            (file, epoch_guard)
125        };
126
127        // The scope guard needs to be held and outlive the file Arc and the clones of it.
128        let Some(_scope_guard) = file.pager().scope.try_active_guard() else {
129            // If an active guard can't be acquired then the filesystem must be shutting down. Fail
130            // the page request to avoid leaving the client hanging.
131            file.pager().report_failure(file.vmo(), contents.range(), zx::Status::BAD_STATE);
132            return;
133        };
134        match command {
135            ZX_PAGER_VMO_READ => {
136                file.clone().page_in(PageInRange::new(contents.range(), file, epoch_guard.unwrap()))
137            }
138            ZX_PAGER_VMO_DIRTY => {
139                file.clone().mark_dirty(MarkDirtyRange::new(contents.range(), file))
140            }
141            _ => unreachable!("Unhandled commands are filtered above"),
142        }
143    }
144
145    fn receive_signal_packet(&self, signals: SignalPacket) {
146        assert!(signals.observed().contains(zx::Signals::VMO_ZERO_CHILDREN));
147
148        // Check to see if there really are no children (which is necessary to avoid races) and, if
149        // so, replace the strong reference with a weak one and call on_zero_children on the node.
150        // If the file does have children, this asks the kernel to send us the ON_ZERO_CHILDREN
151        // notification for the file.
152        let mut file = self.file.lock();
153        if let FileHolder::Strong(strong) = &*file {
154            // If the last strong reference to the Arc is dropped here, then FxVolume's shutdown
155            // won't wait for the inner node object to be dropped. Taking an active guard around
156            // dropping the strong reference forces the FxVolume to wait for the file to be dropped.
157            // If the scope has begun shutdown then we can't take an active guard, so instead we do
158            // nothing here and the strong reference in the FileHolder will be removed by calling
159            // `FxNode.terminate()` as part of `NodeCache.terminate()` in the FxVolume termination
160            // thread.
161            let Some(_guard) = strong.pager().scope.try_active_guard() else {
162                info!("Ignoring zero-children notification due to shutting down");
163                return;
164            };
165            match strong.vmo().info() {
166                Ok(info) => {
167                    if info.num_children == 0 {
168                        let weak = FileHolder::Weak(Arc::downgrade(&strong));
169                        let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
170                            unreachable!();
171                        };
172                        STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
173                        strong.on_zero_children();
174                    } else {
175                        // There's not much we can do here if this fails, so we panic.
176                        watch_for_zero_children(strong.as_ref()).unwrap();
177                    }
178                }
179                Err(e) => error!(error:? = e; "Vmo::info failed"),
180            }
181        }
182    }
183}
184
185impl<T: PagerBacked> fasync::PacketReceiver for PagerPacketReceiver<T> {
186    fn receive_packet(&self, packet: zx::Packet) {
187        match packet.contents() {
188            PacketContents::Pager(contents) => {
189                self.receive_pager_packet(contents);
190            }
191            PacketContents::SignalOne(signals) => {
192                self.receive_signal_packet(signals);
193            }
194            _ => unreachable!(), // We don't expect any other kinds of packets.
195        }
196    }
197}
198
199pub struct Pager {
200    pager: zx::Pager,
201    scope: ExecutionScope,
202    executor: fasync::EHandle,
203    recorder: Mutex<Option<Box<dyn Recorder>>>,
204}
205
206// FileHolder is used to retain either a strong or a weak reference to a file.  If there are any
207// child VMOs that have been shared, then we will have a strong reference which is required to keep
208// the file alive.  When we detect that there are no more children, we can downgrade to a weak
209// reference which will allow the file to be cleaned up if there are no other uses.
210enum FileHolder<T> {
211    Strong(Arc<T>),
212    Weak(Weak<T>),
213}
214
215/// Pager handles page requests. It is a per-volume object.
216impl Pager {
217    /// Creates a new pager.
218    pub fn new(scope: ExecutionScope) -> Result<Self, Error> {
219        Ok(Pager {
220            pager: zx::Pager::create(zx::PagerOptions::empty())?,
221            scope,
222            executor: fasync::EHandle::local(),
223            recorder: Mutex::new(None),
224        })
225    }
226
227    /// Spawns a short term task for the pager that includes a guard that will prevent termination.
228    fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) {
229        if let Some(guard) = self.scope.try_active_guard() {
230            self.executor.spawn_detached(FutureWithGuard::new(guard, task));
231        }
232    }
233
234    /// Set the current profile recorder, or set to None to not record.
235    pub fn set_recorder(&self, recorder: Option<Box<dyn Recorder>>) {
236        // Drop the old one outside of the lock.
237        let _old = std::mem::replace(&mut (*self.recorder.lock()), recorder);
238    }
239
240    /// Borrow the profile recorder. Used to record file opens.
241    pub fn recorder(&self) -> MutexGuard<'_, Option<Box<dyn Recorder>>> {
242        self.recorder.lock()
243    }
244
245    /// Record a range into a profile if one is being recorded.
246    pub fn record_page_in<P: PagerBacked>(&self, node: Arc<P>, range: Range<u64>) {
247        let mut recorder_holder = self.recorder.lock();
248        if let Some(recorder) = &mut (*recorder_holder) {
249            // If the message fails to send, so will all the rest.
250            if let Err(_) = recorder.record(node, range.start) {
251                *recorder_holder = None;
252            }
253        }
254    }
255
256    /// Creates a new VMO to be used with the pager.
257    pub fn create_vmo<T: PagerBacked>(
258        &self,
259        file: Weak<T>,
260        initial_size: u64,
261        vmo_options: zx::VmoOptions,
262    ) -> Result<(zx::Vmo, PagerPacketReceiverRegistration<T>), Error> {
263        let registration = self
264            .executor
265            .register_receiver(PagerPacketReceiver { file: Mutex::new(FileHolder::Weak(file)) });
266        Ok((
267            self.pager.create_vmo(
268                vmo_options,
269                self.executor.port(),
270                registration.key(),
271                initial_size,
272            )?,
273            registration,
274        ))
275    }
276
277    /// Starts watching for the `VMO_ZERO_CHILDREN` signal on `file`'s vmo. Returns false if the
278    /// signal is already being watched for. When the pager receives the `VMO_ZERO_CHILDREN` signal
279    /// [`PagerBacked::on_zero_children`] will be called.
280    pub fn watch_for_zero_children(&self, file: &impl PagerBacked) -> Result<bool, Error> {
281        let mut file = file.pager_packet_receiver_registration().file.lock();
282
283        match &*file {
284            FileHolder::Weak(weak) => {
285                // Should never fail because watch_for_zero_children should be called from `file`.
286                let strong = weak.upgrade().unwrap();
287
288                watch_for_zero_children(strong.as_ref())?;
289
290                STRONG_FILE_REFS.fetch_add(1, Ordering::Relaxed);
291                *file = FileHolder::Strong(strong);
292                Ok(true)
293            }
294            FileHolder::Strong(_) => Ok(false),
295        }
296    }
297
298    /// Supplies pages in response to a `ZX_PAGER_VMO_READ` page request. See
299    /// `zx_pager_supply_pages` for more information.
300    fn supply_pages(
301        &self,
302        vmo: &zx::Vmo,
303        range: Range<u64>,
304        transfer_vmo: &zx::Vmo,
305        transfer_offset: u64,
306    ) {
307        if let Err(e) = self.pager.supply_pages(vmo, range, transfer_vmo, transfer_offset) {
308            error!(error:? = e; "supply_pages failed");
309        }
310    }
311
312    /// Notifies the kernel that a page request for the given `range` has failed. Sent in response
313    /// to a `ZX_PAGER_VMO_READ` or `ZX_PAGER_VMO_DIRTY` page request. See `ZX_PAGER_OP_FAIL` for
314    /// more information.
315    fn report_failure(&self, vmo: &zx::Vmo, range: Range<u64>, status: zx::Status) {
316        let pager_status = match status {
317            zx::Status::IO_DATA_INTEGRITY => zx::Status::IO_DATA_INTEGRITY,
318            zx::Status::NO_SPACE => zx::Status::NO_SPACE,
319            zx::Status::FILE_BIG => zx::Status::BUFFER_TOO_SMALL,
320            zx::Status::IO
321            | zx::Status::IO_DATA_LOSS
322            | zx::Status::IO_INVALID
323            | zx::Status::IO_MISSED_DEADLINE
324            | zx::Status::IO_NOT_PRESENT
325            | zx::Status::IO_OVERRUN
326            | zx::Status::IO_REFUSED
327            | zx::Status::PEER_CLOSED => zx::Status::IO,
328            _ => zx::Status::BAD_STATE,
329        };
330        if let Err(e) = self.pager.op_range(zx::PagerOp::Fail(pager_status), vmo, range) {
331            error!(error:? = e; "op_range failed");
332        }
333    }
334
335    /// Allows the kernel to dirty the `range` of pages. Sent in response to a `ZX_PAGER_VMO_DIRTY`
336    /// page request. See `ZX_PAGER_OP_DIRTY` for more information.
337    fn dirty_pages(&self, vmo: &zx::Vmo, range: Range<u64>) -> Result<(), zx::Status> {
338        let result = self.pager.op_range(zx::PagerOp::Dirty, vmo, range);
339        if let Err(e) = &result {
340            // It is possible for `ZX_ERR_NOT_FOUND` to be returned on a clean page that has been
341            // evicted. In this case, the  kernel will retry if necessary.See
342            // https://fxbug.dev/42086069 for more information.
343            error!(error:? = e; "dirty_pages failed");
344        }
345        return result;
346    }
347
348    /// Notifies the kernel that the filesystem has started cleaning the `range` of pages. See
349    /// `ZX_PAGER_OP_WRITEBACK_BEGIN` for more information.
350    pub fn writeback_begin(
351        &self,
352        vmo: &zx::Vmo,
353        range: Range<u64>,
354        options: zx::PagerWritebackBeginOptions,
355    ) {
356        if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackBegin(options), vmo, range) {
357            error!(error:? = e; "writeback_begin failed");
358        }
359    }
360
361    /// Notifies the kernel that the filesystem has finished cleaning the `range` of pages. See
362    /// `ZX_PAGER_OP_WRITEBACK_END` for more information.
363    pub fn writeback_end(&self, vmo: &zx::Vmo, range: Range<u64>) {
364        if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackEnd, vmo, range) {
365            error!(error:? = e; "writeback_end failed");
366        }
367    }
368
369    /// Queries the `vmo` for ranges that are dirty within `range`. Returns `(num_returned,
370    /// num_remaining)` where `num_returned` is the number of objects populated in `buffer` and
371    /// `num_remaining` is the number of dirty ranges remaining in `range` that could not fit in
372    /// `buffer`. See `zx_pager_query_dirty_ranges` for more information.
373    pub fn query_dirty_ranges(
374        &self,
375        vmo: &zx::Vmo,
376        range: Range<u64>,
377        buffer: &mut [VmoDirtyRange],
378    ) -> Result<(usize, usize), zx::Status> {
379        let mut actual = 0;
380        let mut avail = 0;
381        let status = unsafe {
382            // TODO(https://fxbug.dev/42142550) Move to src/lib/zircon/rust/src/pager.rs once
383            // query_dirty_ranges is part of the stable vDSO.
384            zx::sys::zx_pager_query_dirty_ranges(
385                self.pager.raw_handle(),
386                vmo.raw_handle(),
387                range.start,
388                range.end - range.start,
389                buffer.as_mut_ptr() as *mut u8,
390                std::mem::size_of_val(buffer),
391                &mut actual as *mut usize,
392                &mut avail as *mut usize,
393            )
394        };
395        zx::ok(status).map(|_| (actual, avail - actual))
396    }
397
398    /// Queries the `vmo` for any pager related statistics. If
399    /// `PagerVmoStatsOptions::RESET_VMO_STATS` is passed then the stats will also be reset. See
400    /// `zx_pager_query_vmo_stats` for more information.
401    pub fn query_vmo_stats(
402        &self,
403        vmo: &zx::Vmo,
404        options: PagerVmoStatsOptions,
405    ) -> Result<PagerVmoStats, zx::Status> {
406        #[repr(C)]
407        #[derive(Default)]
408        struct zx_pager_vmo_stats {
409            pub modified: u32,
410        }
411        const ZX_PAGER_VMO_STATS_MODIFIED: u32 = 1;
412        let mut vmo_stats = MaybeUninit::<zx_pager_vmo_stats>::uninit();
413        let status = unsafe {
414            // TODO(https://fxbug.dev/42142550) Move to src/lib/zircon/rust/src/pager.rs once
415            // query_vmo_stats is part of the stable vDSO.
416            zx::sys::zx_pager_query_vmo_stats(
417                self.pager.raw_handle(),
418                vmo.raw_handle(),
419                options.bits(),
420                vmo_stats.as_mut_ptr() as *mut u8,
421                std::mem::size_of::<zx_pager_vmo_stats>(),
422            )
423        };
424        zx::ok(status)?;
425        let vmo_stats = unsafe { vmo_stats.assume_init() };
426        Ok(PagerVmoStats { was_vmo_modified: vmo_stats.modified == ZX_PAGER_VMO_STATS_MODIFIED })
427    }
428
429    pub async fn page_in_barrier() {
430        Epoch::global().barrier().await;
431    }
432}
433
434/// This is a trait for objects (files/blobs) that expose a pager backed VMO.
435pub trait PagerBacked: FxNode + Sync + Send + Sized + 'static {
436    /// The pager backing this VMO.
437    fn pager(&self) -> &Pager;
438
439    /// The receiver registration returned from [`Pager::create_vmo`].
440    fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self>;
441
442    /// The pager backed VMO that this object is handling packets for. The VMO must be created with
443    /// [`Pager::create_vmo`].
444    fn vmo(&self) -> &zx::Vmo;
445
446    /// Called by the pager when a `ZX_PAGER_VMO_READ` packet is received for the VMO. The
447    /// implementation must respond by calling either `PageInRange::supply_pages` or
448    /// `PageInRange::report_failure`.
449    fn page_in(self: Arc<Self>, range: PageInRange<Self>);
450
451    /// Called by the pager when a `ZX_PAGER_VMO_DIRTY` packet is received for the VMO. The
452    /// implementation must respond by calling either `MarkDirtyRange::dirty_pages` or
453    /// `MarkDirtyRange::report_failure`.
454    fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>);
455
456    /// Called by the pager to indicate there are no more VMO children.
457    fn on_zero_children(self: Arc<Self>);
458
459    /// Total bytes readable. Anything reads over this will be zero padded in the VMO.
460    fn byte_size(&self) -> u64;
461
462    /// Reads one or more blocks into a buffer and returns it. This method is called by
463    /// `default_page_in` and `aligned_byte_range` will always be aligned to the `read_ahead_size`
464    /// past to `default_page_in` unless that would extend beyond `self.byte_size()`, in which case,
465    /// `aligned_byte_range` will end at `self.byte_size()`'s next page multiple. The returned
466    /// buffer must be at least as large as the requested range. Only the requested range will be
467    /// supplied to the pager.
468    fn aligned_read(
469        &self,
470        aligned_byte_range: std::ops::Range<u64>,
471    ) -> impl Future<Output = Result<buffer::Buffer<'_>, Error>> + Send;
472}
473
474/// A generic page_in implementation that supplies pages using block-aligned reads.
475pub fn default_page_in<P: PagerBacked>(
476    this: Arc<P>,
477    pager_range: PageInRange<P>,
478    read_ahead_size: u64,
479) {
480    fxfs_trace::duration!(
481        "start-page-in",
482        "offset" => pager_range.start(),
483        "len" => pager_range.len()
484    );
485
486    const ZERO_VMO_SIZE: u64 = 1_048_576;
487    static ZERO_VMO: std::sync::LazyLock<zx::Vmo> =
488        std::sync::LazyLock::new(|| zx::Vmo::create(ZERO_VMO_SIZE).unwrap());
489
490    assert!(pager_range.end() < i64::MAX as u64);
491
492    // Two important subtleties to consider in this space:
493    //
494    // `byte_size` is the official size of the object. VMOs are page-aligned so `page_aligned_size`
495    // is the "official" page length of the object. This may be smaller than Vmo::get_size because
496    // these two things are not updated atomically. The reverse is not true -- We do not currently
497    // ever shrink a VMO's size. We also do not update byte_size (self.handle.get_size()) if an
498    // independent handle is used to grow a file. This means the VMO's size should always be
499    // strictly equal or bigger than `byte_size`.
500    //
501    // It is valid to supply more pages than asked, but supplying pages outside of the VMO range
502    // will trigger OUT_OF_RANGE errors and the call will fail without supplying anything. We must
503    // supply the range requested under all circumstances to unblock any page misses but we should
504    // take care to never supply additional pages beyond `page_aligned_size` as there is a chance
505    // that we might serve a range outside of the VMO and fail to supply anything at all.
506
507    let page_aligned_size = round_up(this.byte_size(), page_size()).unwrap();
508
509    // Zero-pad the tail if the requested range exceeds the size of the thing we're reading. This
510    // can happen when we truncate and there are outstanding pager requests that the kernel was not
511    // able to cancel in time.
512    let (read_range, zero_range) = pager_range.split(page_aligned_size);
513    if let Some(zero_range) = zero_range {
514        for range in zero_range.chunks(ZERO_VMO_SIZE) {
515            range.supply_pages(&ZERO_VMO, 0);
516        }
517    }
518
519    if let Some(read_range) = read_range {
520        let expanded_range_for_readahead = round_down(read_range.start(), read_ahead_size)
521            ..std::cmp::min(
522                round_up(read_range.end(), read_ahead_size).unwrap(),
523                page_aligned_size,
524            );
525        let read_range = read_range.expand(expanded_range_for_readahead);
526        for range in read_range.chunks(read_ahead_size) {
527            // Record the page in before spawning the task to handle the page-in. This is necessary
528            // so that we don't miss this page-in when replaying and recording a new profile.  The
529            // replay is considered finished once we've responded to the page request, so if we if
530            // we spawn the page request before recording the page-in, it's possible (albeit
531            // unlikely) that the profiler can think the replay has finished, but not know about the
532            // page request and so the next recording to be missing the page request.  With the
533            // order swapped, the `test_profile` test would have a rare flake.
534            this.pager().record_page_in(this.clone(), range.range.clone());
535
536            this.pager().spawn(page_in_chunk(this.clone(), range));
537        }
538    }
539}
540
541#[fxfs_trace::trace("offset" => read_range.start(), "len" => read_range.len())]
542async fn page_in_chunk<P: PagerBacked>(this: Arc<P>, read_range: PageInRange<P>) {
543    let buffer = match this.aligned_read(read_range.range()).await {
544        Ok(v) => v,
545        Err(error) => {
546            error!(range:? = read_range.range(), error:?; "Failed to load range");
547            read_range.report_failure(map_to_status(error));
548            return;
549        }
550    };
551    assert!(
552        buffer.len() as u64 >= read_range.len(),
553        "A buffer smaller than requested was returned. requested: {}, returned: {}",
554        read_range.len(),
555        buffer.len()
556    );
557    read_range.supply_pages(buffer.allocator().buffer_source().vmo(), buffer.range().start as u64);
558}
559
560/// Represents a dirty range of page aligned bytes within a pager backed VMO.
561#[repr(C)]
562#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
563pub struct VmoDirtyRange {
564    offset: u64,
565    length: u64,
566    options: u64,
567}
568
569impl VmoDirtyRange {
570    /// The page aligned byte range.
571    pub fn range(&self) -> Range<u64> {
572        self.offset..(self.offset + self.length)
573    }
574
575    /// Returns true if all of the bytes in the range are 0.
576    pub fn is_zero_range(&self) -> bool {
577        self.options & zx::sys::ZX_VMO_DIRTY_RANGE_IS_ZERO != 0
578    }
579}
580
581bitflags! {
582    /// Options for `Pager::query_vmo_stats`.
583    #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
584    #[repr(transparent)]
585    pub struct PagerVmoStatsOptions: u32 {
586        /// Resets the stats at the of the `Pager::query_vmo_stats` call.
587        const RESET_VMO_STATS = 1;
588    }
589}
590
591/// Pager related statistic for a VMO.
592#[derive(Debug)]
593pub struct PagerVmoStats {
594    was_vmo_modified: bool,
595}
596
597impl PagerVmoStats {
598    /// Returns true if the VMO was modified since the last time the VMO stats were reset.
599    pub fn was_vmo_modified(&self) -> bool {
600        self.was_vmo_modified
601    }
602}
603
604#[inline]
605fn page_size() -> u64 {
606    zx::system_get_page_size().into()
607}
608
609/// A trait for specializing `PagerRange` for different request types.
610pub trait PagerRequestType {
611    /// Returns the name of the request type for logging purposes.
612    fn request_type_name() -> &'static str;
613}
614
615/// A request generated from a ZX_PAGER_VMO_READ packet.
616pub struct PageInRequest;
617
618impl PagerRequestType for PageInRequest {
619    fn request_type_name() -> &'static str {
620        "PageInRequest"
621    }
622}
623
624/// The requested range from a ZX_PAGER_VMO_READ packet. This object must not be dropped without
625/// calling either `supply_pages` or `report_failure`.
626pub type PageInRange<T> = PagerRange<T, PageInRequest>;
627
628impl<T: PagerBacked> PageInRange<T> {
629    /// Constructs a new `PageInRange<T>`. `range` must be page aligned.
630    pub fn new(range: Range<u64>, file: Arc<T>, epoch_guard: EpochGuard<'static>) -> Self {
631        debug_assert!(
632            range.start % page_size() == 0 && range.end % page_size() == 0,
633            "{:?} is not page aligned",
634            range
635        );
636        Self {
637            range,
638            inner: Some(PagerRangeInner { file, _epoch_guard: Some(epoch_guard) }),
639            _request_type: PhantomData,
640        }
641    }
642
643    /// Supplies pages to the kernel for this range. See `zx_pager_supply_pages` for more
644    /// information.
645    pub fn supply_pages(mut self, transfer_vmo: &zx::Vmo, transfer_offset: u64) {
646        let inner = self.inner.take().unwrap();
647        inner.file.pager().supply_pages(
648            inner.file.vmo(),
649            self.range.clone(),
650            transfer_vmo,
651            transfer_offset,
652        );
653    }
654}
655
656/// A requested generated from a ZX_PAGER_VMO_DIRTY packet.
657#[derive(Debug)]
658pub struct MarkDirtyRequest;
659
660impl PagerRequestType for MarkDirtyRequest {
661    fn request_type_name() -> &'static str {
662        "MarkDirtyRequest"
663    }
664}
665
666/// The requested range from a ZX_PAGER_VMO_DIRTY packet. This object must not be dropped without
667/// calling either `mark_dirty` or `report_failure`.
668pub type MarkDirtyRange<T> = PagerRange<T, MarkDirtyRequest>;
669
670impl<T: PagerBacked> MarkDirtyRange<T> {
671    /// Constructs a new `MarkDirtyRange<T>`. `range` must be page aligned.
672    pub fn new(range: Range<u64>, file: Arc<T>) -> Self {
673        debug_assert!(
674            range.start % page_size() == 0 && range.end % page_size() == 0,
675            "{:?} is not page aligned",
676            range
677        );
678        Self {
679            range,
680            inner: Some(PagerRangeInner { file, _epoch_guard: None }),
681            _request_type: PhantomData,
682        }
683    }
684
685    /// Allows the kernel to dirty this range of pages. See `ZX_PAGER_OP_DIRTY` for more
686    /// information.
687    pub fn dirty_pages(mut self) -> Result<(), zx::Status> {
688        let inner = self.inner.take().unwrap();
689        inner.file.pager().dirty_pages(inner.file.vmo(), self.range.clone())
690    }
691}
692
693#[derive(Clone)]
694struct PagerRangeInner<T: std::clone::Clone + Deref<Target: PagerBacked>> {
695    // All generic types in the template must be cloneable to derive Clone, so we template the Arc
696    // instead of the inner type.
697    file: T,
698
699    /// Holds a reference to the current Epoch, so that in-flight read requests can be tracked. This
700    /// should be None for MarkDirty requests.
701    _epoch_guard: Option<EpochGuard<'static>>,
702}
703
704/// The requested range from a pager packet. This object ensures that all pager requests receive a
705/// response.
706pub struct PagerRange<T: PagerBacked, U: PagerRequestType> {
707    range: Range<u64>,
708
709    /// Contains the file and the ref guard. If this is None, then the request is complete.
710    inner: Option<PagerRangeInner<Arc<T>>>,
711
712    _request_type: PhantomData<U>,
713}
714
715impl<T: PagerBacked, U: PagerRequestType> PagerRange<T, U> {
716    /// Splits the underlying range allowing for different parts of the range to be handled and
717    /// responded to independently. See `RangeExt::split` for how splitting a range works.
718    /// `split_point` must be page aligned.
719    pub fn split(mut self, split_point: u64) -> (Option<Self>, Option<Self>) {
720        let inner = self.inner.take().unwrap();
721        let (left, right) = self.range.clone().split(split_point);
722        let right = right.map(|range| Self {
723            range,
724            inner: Some(inner.clone()),
725            _request_type: PhantomData,
726        });
727        let left = left.map(|range| Self { range, inner: Some(inner), _request_type: PhantomData });
728        (left, right)
729    }
730
731    /// Increases the size of the range that will be responded to. Panics if the current range is
732    /// not a subset of `new_range`. `new_range` must be page aligned.
733    pub fn expand(mut self, new_range: Range<u64>) -> Self {
734        assert!(
735            self.range.start >= new_range.start && self.range.end <= new_range.end,
736            "{:?} is not a subset of {:?}",
737            self.range,
738            new_range
739        );
740        debug_assert!(
741            new_range.start % page_size() == 0 && new_range.end % page_size() == 0,
742            "{:?} is not page aligned",
743            new_range
744        );
745        self.range = new_range;
746        self
747    }
748
749    /// Returns an iterator that splits the range into ranges of `chunk_size`. If the length of the
750    /// range is not a multiple of `chunk_size` then the last chunk won't be of length `chunk_size`.
751    /// The returned iterator will panic if it's dropped without being fully consumed. `chunk_size`
752    /// must a multiple of the page size.
753    pub fn chunks(mut self, chunk_size: u64) -> PagerRangeChunksIter<T, U> {
754        debug_assert!(
755            chunk_size % page_size() == 0,
756            "{} is not a multiple of the page size",
757            chunk_size
758        );
759        PagerRangeChunksIter {
760            start: self.range.start,
761            end: self.range.end,
762            chunk_size: chunk_size,
763            inner: self.inner.take(),
764            _request_type: PhantomData,
765        }
766    }
767
768    #[inline]
769    pub fn start(&self) -> u64 {
770        self.range.start
771    }
772
773    #[inline]
774    pub fn end(&self) -> u64 {
775        self.range.end
776    }
777
778    #[inline]
779    pub fn len(&self) -> u64 {
780        self.range.end - self.range.start
781    }
782
783    #[inline]
784    pub fn range(&self) -> Range<u64> {
785        self.range.clone()
786    }
787
788    /// Notifies the kernel that the page request for this range has failed. See `ZX_PAGER_OP_FAIL`
789    /// for more information.
790    pub fn report_failure(mut self, status: zx::Status) {
791        let inner = self.inner.take().unwrap();
792        inner.file.pager().report_failure(inner.file.vmo(), self.range.clone(), status);
793    }
794
795    /// Test only method that will consume the PagerRange without having the send a response.
796    #[cfg(test)]
797    fn consume(mut self) {
798        self.inner.take().unwrap();
799    }
800}
801
802impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRange<T, U> {
803    fn drop(&mut self) {
804        if let Some(inner) = &self.inner {
805            let request_type = U::request_type_name();
806            let range = self.range.clone();
807            let key = inner.file.pager_packet_receiver_registration().key();
808            if cfg!(debug_assertions) {
809                // If this object is being dropped as part of a panic then avoid panicking again.
810                // Dropping pager packets when fxfs is crashing is acceptable. Panicking again would
811                // only clutter the logs.
812                if !std::thread::panicking() {
813                    panic!(
814                        "PagerRange was dropped without sending a response, \
815                        request_type={request_type}, range={range:?}, key={key}",
816                    );
817                }
818            } else {
819                error!(
820                    "PagerRange was dropped without sending a response, \
821                    request_type={request_type}, range={range:?}, key={key}",
822                );
823                inner.file.pager().report_failure(inner.file.vmo(), range, zx::Status::BAD_STATE);
824            }
825        }
826    }
827}
828
829/// An iterator similar to `std::slice::Chunks` which yields `PagerRange` objects.
830/// `PagerRangeChunksIter` will panic if it's dropped without being fully consumed.
831pub struct PagerRangeChunksIter<T: PagerBacked, U: PagerRequestType> {
832    start: u64,
833    end: u64,
834    chunk_size: u64,
835    /// The file and locks/references that need to survive the request.
836    inner: Option<PagerRangeInner<Arc<T>>>,
837    _request_type: PhantomData<U>,
838}
839
840impl<T: PagerBacked, U: PagerRequestType> Iterator for PagerRangeChunksIter<T, U> {
841    type Item = PagerRange<T, U>;
842    fn next(&mut self) -> Option<Self::Item> {
843        if self.start == self.end {
844            None
845        } else if self.start + self.chunk_size >= self.end {
846            let next = Self::Item {
847                range: self.start..self.end,
848                inner: self.inner.take(),
849                _request_type: PhantomData,
850            };
851            self.start = self.end;
852            Some(next)
853        } else {
854            let next_end = self.start + self.chunk_size;
855            let next = Self::Item {
856                range: self.start..next_end,
857                inner: self.inner.clone(),
858                _request_type: PhantomData,
859            };
860            self.start = next_end;
861            Some(next)
862        }
863    }
864}
865
866impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRangeChunksIter<T, U> {
867    fn drop(&mut self) {
868        if self.start != self.end {
869            let request_type = U::request_type_name();
870            let remaining = self.start..self.end;
871            let inner = self.inner.take().unwrap();
872            let key = inner.file.pager_packet_receiver_registration().key();
873            if cfg!(debug_assertions) {
874                // If this object is being dropped as part of a panic then avoid panicking again.
875                // Dropping pager packets when fxfs is crashing is acceptable. Panicking again would
876                // only clutter the logs.
877                if !std::thread::panicking() {
878                    panic!(
879                        "PagerRangeChunksIter was dropped without being fully consumed, \
880                    request_type={request_type}, remaining={remaining:?}, key={key}",
881                    );
882                }
883            } else {
884                error!(
885                    "PagerRangeChunksIter was dropped without being fully consumed, \
886                    request_type={request_type}, remaining={remaining:?}, key={key}",
887                );
888                inner.file.pager().report_failure(
889                    inner.file.vmo(),
890                    remaining,
891                    zx::Status::BAD_STATE,
892                );
893            }
894        }
895    }
896}
897
898#[cfg(test)]
899mod tests {
900    use super::*;
901    use futures::StreamExt;
902    use futures::channel::mpsc;
903    use fxfs_macros::ToWeakNode;
904
905    #[derive(Clone, Debug, PartialEq, Eq)]
906    enum PagerRequest {
907        PageIn(Range<u64>),
908        Dirty(Range<u64>),
909    }
910
911    #[derive(ToWeakNode)]
912    struct MockFile {
913        vmo: zx::Vmo,
914        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
915        pager: Arc<Pager>,
916        /// page in requests get logged so we can compare actual calls to to expectations.
917        pager_requests: Mutex<Vec<PagerRequest>>,
918    }
919
920    impl MockFile {
921        fn new(pager: Arc<Pager>) -> Arc<Self> {
922            Self::new_with_size_and_type(pager, page_size(), zx::VmoOptions::UNBOUNDED)
923        }
924
925        fn new_with_size_and_type(
926            pager: Arc<Pager>,
927            size: u64,
928            vmo_type: zx::VmoOptions,
929        ) -> Arc<Self> {
930            Arc::new_cyclic(|weak| {
931                let (vmo, pager_packet_receiver_registration) = pager
932                    .create_vmo(weak.clone(), size, vmo_type | zx::VmoOptions::TRAP_DIRTY)
933                    .unwrap();
934                Self {
935                    pager,
936                    vmo,
937                    pager_packet_receiver_registration,
938                    pager_requests: Default::default(),
939                }
940            })
941        }
942
943        // Returns the page_in requests received for this file.
944        fn pager_requests(&self, reset: bool) -> Vec<PagerRequest> {
945            if reset {
946                std::mem::take(&mut *self.pager_requests.lock())
947            } else {
948                self.pager_requests.lock().clone()
949            }
950        }
951    }
952
953    impl FxNode for MockFile {
954        fn object_id(&self) -> u64 {
955            unimplemented!();
956        }
957
958        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
959            unimplemented!();
960        }
961
962        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
963            unimplemented!();
964        }
965
966        fn open_count_add_one(&self) {
967            unimplemented!();
968        }
969
970        fn open_count_sub_one(self: Arc<Self>) {
971            unimplemented!();
972        }
973
974        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
975            unimplemented!();
976        }
977    }
978
979    impl PagerBacked for MockFile {
980        fn pager(&self) -> &Pager {
981            &self.pager
982        }
983
984        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
985            &self.pager_packet_receiver_registration
986        }
987
988        fn vmo(&self) -> &zx::Vmo {
989            &self.vmo
990        }
991
992        fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
993            let aux_vmo = zx::Vmo::create(range.len()).unwrap();
994            self.pager_requests.lock().push(PagerRequest::PageIn(range.range()));
995            range.supply_pages(&aux_vmo, 0);
996        }
997
998        fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
999            self.pager_requests.lock().push(PagerRequest::Dirty(range.range()));
1000            let _ = range.dirty_pages();
1001        }
1002
1003        fn on_zero_children(self: Arc<Self>) {}
1004
1005        fn byte_size(&self) -> u64 {
1006            unimplemented!();
1007        }
1008        async fn aligned_read(
1009            &self,
1010            _aligned_byte_range: std::ops::Range<u64>,
1011        ) -> Result<buffer::Buffer<'_>, Error> {
1012            unimplemented!();
1013        }
1014    }
1015
1016    #[derive(ToWeakNode)]
1017    struct OnZeroChildrenFile {
1018        pager: Arc<Pager>,
1019        vmo: zx::Vmo,
1020        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1021        sender: Mutex<mpsc::UnboundedSender<()>>,
1022    }
1023
1024    impl OnZeroChildrenFile {
1025        fn new(pager: Arc<Pager>, sender: mpsc::UnboundedSender<()>) -> Arc<Self> {
1026            Arc::new_cyclic(|weak| {
1027                let (vmo, pager_packet_receiver_registration) =
1028                    pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1029                Self { pager, vmo, pager_packet_receiver_registration, sender: Mutex::new(sender) }
1030            })
1031        }
1032    }
1033
1034    impl FxNode for OnZeroChildrenFile {
1035        fn object_id(&self) -> u64 {
1036            unimplemented!();
1037        }
1038
1039        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1040            unimplemented!();
1041        }
1042
1043        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1044            unimplemented!();
1045        }
1046
1047        fn open_count_add_one(&self) {
1048            unimplemented!();
1049        }
1050
1051        fn open_count_sub_one(self: Arc<Self>) {
1052            unimplemented!();
1053        }
1054
1055        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1056            unimplemented!();
1057        }
1058    }
1059
1060    impl PagerBacked for OnZeroChildrenFile {
1061        fn pager(&self) -> &Pager {
1062            &self.pager
1063        }
1064
1065        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1066            &self.pager_packet_receiver_registration
1067        }
1068
1069        fn vmo(&self) -> &zx::Vmo {
1070            &self.vmo
1071        }
1072
1073        fn page_in(self: Arc<Self>, _range: PageInRange<Self>) {
1074            unreachable!();
1075        }
1076
1077        fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1078            unreachable!();
1079        }
1080
1081        fn on_zero_children(self: Arc<Self>) {
1082            self.sender.lock().unbounded_send(()).unwrap();
1083        }
1084        fn byte_size(&self) -> u64 {
1085            unreachable!();
1086        }
1087        async fn aligned_read(
1088            &self,
1089            _aligned_byte_range: std::ops::Range<u64>,
1090        ) -> Result<buffer::Buffer<'_>, Error> {
1091            unreachable!();
1092        }
1093    }
1094
1095    #[fuchsia::test(threads = 2)]
1096    async fn test_watch_for_zero_children() {
1097        let (sender, mut receiver) = mpsc::unbounded();
1098        let scope = ExecutionScope::new();
1099        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1100        let file = OnZeroChildrenFile::new(pager.clone(), sender);
1101        {
1102            let _child_vmo = file
1103                .vmo()
1104                .create_child(
1105                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1106                    0,
1107                    file.vmo().get_content_size().unwrap(),
1108                )
1109                .unwrap();
1110            assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1111        }
1112        // Wait for `on_zero_children` to be called.
1113        receiver.next().await.unwrap();
1114
1115        scope.wait().await;
1116    }
1117
1118    #[fuchsia::test(threads = 2)]
1119    async fn test_multiple_watch_for_zero_children_calls() {
1120        let (sender, mut receiver) = mpsc::unbounded();
1121        let scope = ExecutionScope::new();
1122        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1123        let file = OnZeroChildrenFile::new(pager.clone(), sender);
1124        {
1125            let _child_vmo = file
1126                .vmo()
1127                .create_child(
1128                    zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1129                    0,
1130                    file.vmo().get_content_size().unwrap(),
1131                )
1132                .unwrap();
1133            assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1134            // `watch_for_zero_children` will return false when it's already watching.
1135            assert!(!pager.watch_for_zero_children(file.as_ref()).unwrap());
1136        }
1137        receiver.next().await.unwrap();
1138
1139        // The pager stops listening for VMO_ZERO_CHILDREN once the signal fires. Calling
1140        // `watch_for_zero_children` afterwards should return true again because watching had
1141        // stopped.
1142        assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1143
1144        file.pager_packet_receiver_registration.stop_watching_for_zero_children();
1145
1146        scope.wait().await;
1147    }
1148
1149    #[fuchsia::test(threads = 2)]
1150    async fn test_status_code_mapping() {
1151        #[derive(ToWeakNode)]
1152        struct StatusCodeFile {
1153            vmo: zx::Vmo,
1154            pager: Arc<Pager>,
1155            status_code: Mutex<zx::Status>,
1156            pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1157        }
1158
1159        impl FxNode for StatusCodeFile {
1160            fn object_id(&self) -> u64 {
1161                unimplemented!();
1162            }
1163
1164            fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1165                unimplemented!();
1166            }
1167
1168            fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1169                unimplemented!();
1170            }
1171
1172            fn open_count_add_one(&self) {
1173                unimplemented!();
1174            }
1175
1176            fn open_count_sub_one(self: Arc<Self>) {
1177                unimplemented!();
1178            }
1179
1180            fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1181                unimplemented!();
1182            }
1183        }
1184
1185        impl PagerBacked for StatusCodeFile {
1186            fn pager(&self) -> &Pager {
1187                &self.pager
1188            }
1189
1190            fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1191                &self.pager_packet_receiver_registration
1192            }
1193
1194            fn vmo(&self) -> &zx::Vmo {
1195                &self.vmo
1196            }
1197
1198            fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1199                range.report_failure(*self.status_code.lock());
1200            }
1201
1202            fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1203                unreachable!();
1204            }
1205
1206            fn on_zero_children(self: Arc<Self>) {
1207                unreachable!();
1208            }
1209
1210            fn byte_size(&self) -> u64 {
1211                unreachable!();
1212            }
1213            async fn aligned_read(
1214                &self,
1215                _aligned_byte_range: std::ops::Range<u64>,
1216            ) -> Result<buffer::Buffer<'_>, Error> {
1217                unreachable!();
1218            }
1219        }
1220
1221        let scope = ExecutionScope::new();
1222        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1223        let file = Arc::new_cyclic(|weak| {
1224            let (vmo, pager_packet_receiver_registration) =
1225                pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1226            StatusCodeFile {
1227                vmo,
1228                pager: pager.clone(),
1229                status_code: Mutex::new(zx::Status::INTERNAL),
1230                pager_packet_receiver_registration,
1231            }
1232        });
1233
1234        fn check_mapping(
1235            file: &StatusCodeFile,
1236            failure_code: zx::Status,
1237            expected_code: zx::Status,
1238        ) {
1239            {
1240                *file.status_code.lock() = failure_code;
1241            }
1242            let mut buf = [0u8; 8];
1243            assert_eq!(file.vmo().read(&mut buf, 0).unwrap_err(), expected_code);
1244        }
1245        check_mapping(&file, zx::Status::IO_DATA_INTEGRITY, zx::Status::IO_DATA_INTEGRITY);
1246        check_mapping(&file, zx::Status::NO_SPACE, zx::Status::NO_SPACE);
1247        check_mapping(&file, zx::Status::FILE_BIG, zx::Status::BUFFER_TOO_SMALL);
1248        check_mapping(&file, zx::Status::IO, zx::Status::IO);
1249        check_mapping(&file, zx::Status::IO_DATA_LOSS, zx::Status::IO);
1250        check_mapping(&file, zx::Status::NOT_EMPTY, zx::Status::BAD_STATE);
1251        check_mapping(&file, zx::Status::BAD_STATE, zx::Status::BAD_STATE);
1252
1253        scope.wait().await;
1254    }
1255
1256    #[fuchsia::test(threads = 2)]
1257    async fn test_query_vmo_stats() {
1258        let scope = ExecutionScope::new();
1259        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1260        let file = MockFile::new(pager.clone());
1261
1262        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1263        // The VMO hasn't been modified yet.
1264        assert!(!stats.was_vmo_modified());
1265
1266        file.vmo().write(&[0, 1, 2, 3, 4], 0).unwrap();
1267        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1268        assert!(stats.was_vmo_modified());
1269
1270        // Reset the stats this time.
1271        let stats =
1272            pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::RESET_VMO_STATS).unwrap();
1273        // The stats weren't reset last time so the stats are still showing that the vmo is modified.
1274        assert!(stats.was_vmo_modified());
1275
1276        let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1277        assert!(!stats.was_vmo_modified());
1278
1279        scope.wait().await;
1280    }
1281
1282    #[fuchsia::test(threads = 2)]
1283    async fn test_query_dirty_ranges() {
1284        // Some notes on our paging implementation:
1285        //  * Fxfs uses UNBOUNDED VMO. These are maximally sized at creation time with
1286        //    stream size holding the content length.
1287        //  * Like regular VMO, all pages are initially in an unknown state. When a page
1288        //    is first accessed, the pager (Fxfs) will be asked to page in content.
1289        //  * Size can be set as a property, via set_content_size or via set_stream_size
1290        //    but only set_stream_size() should ever be used. This ensures that the tail
1291        //    is correctly zeroed.
1292        let scope = ExecutionScope::new();
1293        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1294        let file = MockFile::new_with_size_and_type(
1295            pager.clone(),
1296            page_size() + page_size() / 2,
1297            zx::VmoOptions::UNBOUNDED,
1298        );
1299        let mut buffer = vec![VmoDirtyRange::default(); 2];
1300
1301        let page_size = page_size();
1302        assert_eq!(file.vmo().get_content_size().unwrap(), page_size + page_size / 2);
1303
1304        let (actual, remaining) =
1305            pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1306        assert_eq!(actual, 0);
1307        assert_eq!(remaining, 0);
1308
1309        // Grow the VMO content size from 1.5 pages to 7.5 pages.
1310        file.vmo().set_stream_size(page_size * 7 + page_size / 2).unwrap();
1311
1312        let (actual, remaining) =
1313            pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1314        assert_eq!(actual, 2);
1315        assert_eq!(remaining, 0);
1316        // Second page must be assumed to contain data so tail is zeroed.
1317        assert_eq!(buffer[0].range(), page_size..page_size * 2);
1318        assert!(!buffer[0].is_zero_range());
1319        // All pages after that are marked as zero.
1320        assert_eq!(buffer[1].range(), page_size * 2..page_size * 8);
1321        assert!(buffer[1].is_zero_range());
1322
1323        // We expect the tail page to have been read as part of the zeroing when we grew the size.
1324        // It will then be marked dirty (modified)
1325        assert_eq!(
1326            file.pager_requests(true),
1327            vec![
1328                PagerRequest::PageIn(page_size * 1..page_size * 2),
1329                PagerRequest::Dirty(page_size * 1..page_size * 2),
1330            ]
1331        );
1332
1333        // Modify the 2nd, 3rd, and 5th pages.
1334        file.vmo().write(&[1, 2, 3, 4], page_size).unwrap();
1335        file.vmo().write(&[1, 2, 3, 4], page_size * 2).unwrap();
1336        file.vmo().write(&[1, 2, 3, 4], page_size * 4).unwrap();
1337
1338        // Pages are known zero because we just grew the file.
1339        // We don't expect any page-in requests for them.
1340        assert_eq!(
1341            file.pager_requests(true),
1342            vec![
1343                PagerRequest::Dirty(page_size * 2..page_size * 3),
1344                PagerRequest::Dirty(page_size * 4..page_size * 5)
1345            ]
1346        );
1347
1348        let (actual, remaining) =
1349            pager.query_dirty_ranges(file.vmo(), 0..page_size * 7, &mut buffer).unwrap();
1350        assert_eq!(actual, 2);
1351        assert_eq!(remaining, 2);
1352        // Second and third pages (non-zero)
1353        assert_eq!(buffer[0].range(), page_size..(page_size * 3));
1354        assert!(!buffer[0].is_zero_range());
1355        // Fourth page is zero.
1356        assert_eq!(buffer[1].range(), (page_size * 3)..(page_size * 4));
1357        assert!(buffer[1].is_zero_range());
1358
1359        let (actual, remaining) = pager
1360            .query_dirty_ranges(file.vmo(), page_size * 4..page_size * 7, &mut buffer)
1361            .unwrap();
1362        assert_eq!(actual, 2);
1363        assert_eq!(remaining, 0);
1364        // Fifth page (non-zero)
1365        assert_eq!(buffer[0].range(), (page_size * 4)..(page_size * 5));
1366        assert!(!buffer[0].is_zero_range());
1367        // Rest of the VMO is zero.
1368        assert_eq!(buffer[1].range(), (page_size * 5)..(page_size * 7));
1369        assert!(buffer[1].is_zero_range());
1370
1371        // Read the 4th page.
1372        let mut read_buf = vec![0u8; page_size as usize];
1373        file.vmo().read(&mut read_buf, page_size * 3).expect("read");
1374        let expected = vec![0u8; page_size as usize];
1375        assert_eq!(read_buf, expected);
1376        assert_eq!(file.pager_requests(true), vec![]);
1377
1378        scope.wait().await;
1379    }
1380
1381    #[fuchsia::test(threads = 2)]
1382    async fn test_zero_grown_vmo() {
1383        // When a VMO's content size is explicitly grown, check that new content is zeroed.
1384        let scope = ExecutionScope::new();
1385        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1386        let file = MockFile::new(pager.clone());
1387
1388        let write_buf = vec![0xff; page_size() as usize * 2];
1389        file.vmo().set_stream_size(page_size() * 2).expect("grow");
1390        file.vmo().write(&write_buf, 0).expect("write");
1391        let mut read_buf = vec![0u8; page_size() as usize * 2];
1392        // The tail beyond the content size will written.
1393        file.vmo().read(&mut read_buf, 0).expect("read");
1394        assert_eq!(read_buf, write_buf);
1395
1396        // The tail beyond the new content size should be zeroed.
1397        file.vmo().set_stream_size(page_size() + 1).expect("shrink");
1398        file.vmo().write(&[0xff; 3], page_size() + 2).expect("write after shrink");
1399        // To make sure the above content size change actually zeroed data, we grow again.
1400        file.vmo().set_stream_size(page_size() + 4).expect("grow again");
1401        let mut read_buf = vec![0u8; page_size() as usize];
1402        file.vmo().read(&mut read_buf, page_size()).expect("read");
1403        let mut expected = vec![0u8; page_size() as usize];
1404        expected[0] = 0xff;
1405        assert_eq!(read_buf, expected);
1406
1407        scope.wait().await;
1408    }
1409
1410    #[fuchsia::test]
1411    async fn test_pager_range_chunks_iter_chunks() {
1412        let scope = ExecutionScope::new();
1413        let pager = Arc::new(Pager::new(scope).unwrap());
1414        let file = MockFile::new(pager.clone());
1415
1416        let pager_range = PageInRange::new(0..page_size() * 5, file, Epoch::global().guard());
1417        let ranges: Vec<Range<u64>> = pager_range
1418            .chunks(page_size() * 2)
1419            .map(|pager_range| {
1420                let range = pager_range.range();
1421                pager_range.consume();
1422                range
1423            })
1424            .collect();
1425        assert_eq!(
1426            ranges,
1427            [
1428                0..page_size() * 2,
1429                page_size() * 2..page_size() * 4,
1430                page_size() * 4..page_size() * 5
1431            ]
1432        );
1433    }
1434
1435    #[fuchsia::test]
1436    async fn test_pager_range_split() {
1437        let scope = ExecutionScope::new();
1438        let pager = Arc::new(Pager::new(scope).unwrap());
1439        let file = MockFile::new(pager.clone());
1440
1441        let pager_range = PageInRange::new(0..page_size() * 10, file, Epoch::global().guard());
1442        let (left, right) = pager_range.split(page_size() * 5);
1443        let (left, right) = (left.unwrap(), right.unwrap());
1444        assert_eq!(left.range(), 0..page_size() * 5);
1445        assert_eq!(right.range(), page_size() * 5..page_size() * 10);
1446
1447        left.consume();
1448        right.consume();
1449    }
1450
1451    #[fuchsia::test]
1452    #[should_panic(expected = "0..8192 is not a subset of 0..4096")]
1453    async fn test_pager_range_bad_expand_panics() {
1454        let scope = ExecutionScope::new();
1455        let pager = Arc::new(Pager::new(scope).unwrap());
1456        let file = MockFile::new(pager.clone());
1457
1458        let pager_range = PageInRange::new(0..page_size() * 2, file, Epoch::global().guard());
1459        pager_range.expand(0..page_size()).consume();
1460    }
1461
1462    #[derive(ToWeakNode)]
1463    struct PagerRangeTestFile {
1464        vmo: zx::Vmo,
1465        pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1466        pager: Pager,
1467        page_in_fn: Box<dyn Fn(PageInRange<Self>) + Send + Sync + 'static>,
1468        mark_dirty_fn: Box<dyn Fn(MarkDirtyRange<Self>) + Send + Sync + 'static>,
1469    }
1470
1471    impl PagerRangeTestFile {
1472        fn new<
1473            F1: Fn(PageInRange<Self>) + Send + Sync + 'static,
1474            F2: Fn(MarkDirtyRange<Self>) + Send + Sync + 'static,
1475        >(
1476            page_in_fn: F1,
1477            mark_dirty_fn: F2,
1478        ) -> Arc<Self> {
1479            Arc::new_cyclic(|weak| {
1480                let pager = Pager::new(ExecutionScope::new()).unwrap();
1481                let (vmo, pager_packet_receiver_registration) = pager
1482                    .create_vmo(weak.clone(), page_size() * 2, zx::VmoOptions::TRAP_DIRTY)
1483                    .unwrap();
1484                Self {
1485                    vmo,
1486                    pager_packet_receiver_registration,
1487                    pager,
1488                    page_in_fn: Box::new(page_in_fn),
1489                    mark_dirty_fn: Box::new(mark_dirty_fn),
1490                }
1491            })
1492        }
1493    }
1494
1495    impl FxNode for PagerRangeTestFile {
1496        fn object_id(&self) -> u64 {
1497            1
1498        }
1499
1500        fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1501            unimplemented!()
1502        }
1503
1504        fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1505            unimplemented!()
1506        }
1507
1508        fn open_count_add_one(&self) {
1509            unimplemented!()
1510        }
1511
1512        fn open_count_sub_one(self: Arc<Self>) {
1513            unimplemented!()
1514        }
1515
1516        fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1517            unimplemented!()
1518        }
1519    }
1520
1521    impl PagerBacked for PagerRangeTestFile {
1522        fn pager(&self) -> &Pager {
1523            &self.pager
1524        }
1525
1526        fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1527            &self.pager_packet_receiver_registration
1528        }
1529
1530        fn vmo(&self) -> &zx::Vmo {
1531            &self.vmo
1532        }
1533
1534        fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1535            (self.page_in_fn)(range)
1536        }
1537
1538        fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1539            (self.mark_dirty_fn)(range)
1540        }
1541
1542        fn on_zero_children(self: Arc<Self>) {}
1543
1544        fn byte_size(&self) -> u64 {
1545            unimplemented!();
1546        }
1547
1548        async fn aligned_read(
1549            &self,
1550            _range: std::ops::Range<u64>,
1551        ) -> Result<buffer::Buffer<'_>, Error> {
1552            unimplemented!();
1553        }
1554    }
1555
1556    fn real_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1557        let aux_vmo = zx::Vmo::create(range.len()).unwrap();
1558        range.supply_pages(&aux_vmo, 0);
1559    }
1560
1561    fn real_mark_dirty(range: MarkDirtyRange<PagerRangeTestFile>) {
1562        let _ = range.dirty_pages();
1563    }
1564
1565    #[fuchsia::test(threads = 2)]
1566    async fn test_page_in_range_supply_pages() {
1567        let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1568
1569        let mut data = vec![0; 20];
1570        file.vmo.read(&mut data, 0).unwrap();
1571    }
1572
1573    #[fuchsia::test(threads = 2)]
1574    async fn test_page_in_range_report_failure() {
1575        let file = PagerRangeTestFile::new(
1576            |range| {
1577                range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1578            },
1579            real_mark_dirty,
1580        );
1581
1582        let mut data = vec![0; 20];
1583        let err = file.vmo.read(&mut data, 0).unwrap_err();
1584        assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1585    }
1586
1587    #[cfg(debug_assertions)]
1588    #[fuchsia::test(threads = 2)]
1589    #[should_panic(expected = "PagerRange was dropped without sending a response")]
1590    async fn test_page_in_range_dropped() {
1591        let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1592
1593        let mut data = vec![0; 20];
1594        file.vmo.read(&mut data, 0).unwrap_err();
1595    }
1596
1597    #[cfg(not(debug_assertions))]
1598    #[fuchsia::test(threads = 2)]
1599    async fn test_page_in_range_dropped() {
1600        let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1601
1602        let mut data = vec![0; 20];
1603        let err = file.vmo.read(&mut data, 0).unwrap_err();
1604        assert_eq!(err, zx::Status::BAD_STATE);
1605    }
1606
1607    #[fuchsia::test(threads = 2)]
1608    async fn test_mark_dirty_range_dirty_pages() {
1609        let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1610
1611        let data = vec![5; 20];
1612        file.vmo.write(&data, 0).unwrap();
1613    }
1614
1615    #[fuchsia::test(threads = 2)]
1616    async fn test_mark_dirty_range_report_failure() {
1617        let file = PagerRangeTestFile::new(real_supply_pages, |range| {
1618            range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1619        });
1620
1621        let data = vec![5; 20];
1622        let err = file.vmo.write(&data, 0).unwrap_err();
1623        assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1624    }
1625
1626    #[cfg(debug_assertions)]
1627    #[fuchsia::test(threads = 2)]
1628    #[should_panic(expected = "PagerRange was dropped without sending a response")]
1629    async fn test_mark_dirty_range_dropped() {
1630        let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1631
1632        let data = vec![5; 20];
1633        file.vmo.write(&data, 0).unwrap_err();
1634    }
1635
1636    #[cfg(not(debug_assertions))]
1637    #[fuchsia::test(threads = 2)]
1638    async fn test_mark_dirty_range_dropped() {
1639        let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1640
1641        let data = vec![5; 20];
1642        let err = file.vmo.write(&data, 0).unwrap_err();
1643        assert_eq!(err, zx::Status::BAD_STATE);
1644    }
1645
1646    #[fuchsia::test(threads = 2)]
1647    async fn test_pager_range_chunks_iter_consumed() {
1648        let file = PagerRangeTestFile::new(
1649            |range| {
1650                let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1651                range.expand(0..page_size() * 2).chunks(page_size()).for_each(|range| {
1652                    range.supply_pages(&aux_vmo, 0);
1653                });
1654            },
1655            real_mark_dirty,
1656        );
1657
1658        let mut data = vec![0; 20];
1659        file.vmo.read(&mut data, 0).unwrap();
1660    }
1661
1662    fn partial_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1663        let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1664        // Expand the range to 2 pages and only supply the first page, dropping the iterator without
1665        // fully consuming it.
1666        range.expand(0..page_size() * 2).chunks(page_size()).take(1).for_each(|range| {
1667            range.supply_pages(&aux_vmo, 0);
1668        });
1669    }
1670
1671    #[cfg(debug_assertions)]
1672    #[fuchsia::test(threads = 2)]
1673    #[should_panic(expected = "PagerRangeChunksIter was dropped without being fully consumed")]
1674    async fn test_pager_range_chunks_iter_dropped() {
1675        let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1676
1677        let mut data = vec![0; 20];
1678        // Ask for the 2nd page. The range will be expanded to the first 2 pages. The first page
1679        // will succeed and the second page will be dropped.
1680        file.vmo.read(&mut data, page_size()).unwrap_err();
1681    }
1682
1683    #[cfg(not(debug_assertions))]
1684    #[fuchsia::test(threads = 2)]
1685    async fn test_pager_range_chunks_iter_dropped() {
1686        let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1687
1688        let mut data = vec![0; 20];
1689        // Ask for the 2nd page. The range will be expanded to the first 2 pages. The first page
1690        // will succeed and the second page will be dropped.
1691        let err = file.vmo.read(&mut data, page_size()).unwrap_err();
1692        assert_eq!(err, zx::Status::BAD_STATE);
1693    }
1694
1695    #[fuchsia::test(threads = 2)]
1696    async fn test_grow_zeroes_new_bytes() {
1697        // We expect that when we grow a file, the pages between the old and the new size
1698        // are zeroed. Reads and writes to these pages after growing a file should NOT
1699        // trigger any page-in requests.
1700        let scope = ExecutionScope::new();
1701        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1702        let page_size = page_size();
1703        let vmo_size: u64 = page_size * 2;
1704        let file_a =
1705            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::RESIZABLE);
1706        let file_b =
1707            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1708        let mut buffer = vec![VmoDirtyRange::default(); 3];
1709
1710        assert_eq!(file_a.vmo().get_stream_size().unwrap(), page_size * 2);
1711        assert_eq!(file_b.vmo().get_stream_size().unwrap(), page_size * 2);
1712
1713        // Page in is expected.
1714        let mut read_buf = vec![0u8; page_size as usize];
1715        file_a.vmo().read(&mut read_buf, page_size).expect("read a");
1716        assert_eq!(
1717            file_a.pager_requests(true),
1718            vec![PagerRequest::PageIn(page_size..page_size * 2)]
1719        );
1720        file_b.vmo().read(&mut read_buf, page_size).expect("read b");
1721        assert_eq!(
1722            file_b.pager_requests(true),
1723            vec![PagerRequest::PageIn(page_size..page_size * 2)]
1724        );
1725
1726        // Grow the VMO size and confirm intermediate pages (2..8) are zero.
1727        let vmo_size = page_size * 8;
1728        file_a.vmo().set_size(vmo_size).unwrap();
1729        file_b.vmo().set_stream_size(vmo_size).unwrap();
1730
1731        assert_eq!(
1732            pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1733            (1, 0)
1734        );
1735        assert_eq!(
1736            buffer[0],
1737            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1738        );
1739        assert_eq!(
1740            pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1741            (1, 0)
1742        );
1743        assert_eq!(
1744            buffer[0],
1745            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1746        );
1747
1748        // The extra pages are all zero. We shouldn't see any page_in requests.
1749        let mut read_buf = vec![0u8; page_size as usize * 6];
1750        file_a.vmo().read(&mut read_buf, page_size * 2).expect("read a");
1751        assert_eq!(file_a.pager_requests(true), vec![]);
1752        file_b.vmo().read(&mut read_buf, page_size * 2).expect("read b");
1753        assert_eq!(file_b.pager_requests(true), vec![]);
1754
1755        // Grow again and check that pager gets notified.
1756        let vmo_size = page_size * 8;
1757        file_a.vmo().set_size(vmo_size).unwrap();
1758        file_b.vmo().set_stream_size(vmo_size).unwrap();
1759        assert_eq!(
1760            pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1761            (1, 0)
1762        );
1763        assert_eq!(
1764            buffer[0],
1765            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1766        );
1767        assert_eq!(
1768            pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1769            (1, 0)
1770        );
1771        assert_eq!(
1772            buffer[0],
1773            VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1774        );
1775        // No pager requests. All new pages are assumed zero.
1776        assert_eq!(file_a.pager_requests(true), vec![],);
1777        assert_eq!(file_b.pager_requests(true), vec![],);
1778
1779        // Modifying a page in this new region should trigger a dirty message to the pager.
1780        file_b.vmo().write(&[1; 10], page_size * 2).unwrap();
1781        assert_eq!(
1782            file_b.pager_requests(true),
1783            vec![PagerRequest::Dirty(page_size * 2..page_size * 3)],
1784        );
1785
1786        // Shrink again to 4 pages and then append a page via zx_stream_write (WRITE)
1787        let vmo_size = page_size * 4;
1788        file_b.vmo().set_stream_size(vmo_size).unwrap();
1789        let stream =
1790            zx::Stream::create(zx::StreamOptions::MODE_WRITE, file_b.vmo(), page_size * 4).unwrap();
1791        stream.write(zx::StreamWriteOptions::empty(), &vec![10; page_size as usize]).unwrap();
1792        assert_eq!(
1793            file_b.pager_requests(true),
1794            vec![PagerRequest::Dirty(page_size * 4..page_size * 5)],
1795        );
1796
1797        // Append a page via zx_stream_write (APPEND)
1798        let stream = zx::Stream::create(
1799            zx::StreamOptions::MODE_WRITE | zx::StreamOptions::MODE_APPEND,
1800            file_b.vmo(),
1801            page_size * 5,
1802        )
1803        .unwrap();
1804        stream.write(zx::StreamWriteOptions::empty(), &[10; 1024]).unwrap();
1805        assert_eq!(
1806            file_b.pager_requests(true),
1807            vec![PagerRequest::Dirty(page_size * 5..page_size * 6)],
1808        );
1809
1810        scope.wait().await;
1811    }
1812
1813    #[fuchsia::test(threads = 2)]
1814    async fn test_pathological_shrink_unbounded_vmo() {
1815        let scope = ExecutionScope::new();
1816        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1817        let page_size = page_size();
1818        let vmo_size: u64 = page_size * 25600; // 100MiB
1819        let file =
1820            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1821        let mut buffer = vec![VmoDirtyRange::default(); 10];
1822
1823        assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1824
1825        // Shrinking by a small step to check that last page truncation works as expected.
1826        for i in 0..vmo_size / 256 {
1827            let data = vec![5; 20];
1828            file.vmo.write(&data, i * 256).expect("write failed");
1829        }
1830
1831        for i in (0..25600u64 / 1024).rev() {
1832            file.vmo().set_stream_size(i * 1024 + page_size / 2).unwrap();
1833        }
1834
1835        assert_eq!(pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap(), (1, 0));
1836        assert_eq!(buffer[0..1], [VmoDirtyRange { offset: 0, length: page_size, options: 0 },]);
1837
1838        scope.wait().await;
1839    }
1840
1841    #[fuchsia::test(threads = 2)]
1842    async fn test_pathological_shrink_unbounded_vmo_with_gaps() {
1843        let scope = ExecutionScope::new();
1844        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1845        let page_size = page_size();
1846        let vmo_size: u64 = page_size * 25600; // 100MiB
1847        let file =
1848            MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1849        let mut buffer = vec![VmoDirtyRange::default(); 10];
1850
1851        assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1852
1853        // Write every second page.
1854        for offset in (0u64..vmo_size).step_by((page_size * 2) as usize) {
1855            let data = vec![5; 20];
1856            file.vmo.write(&data, offset).expect("write failed");
1857        }
1858        // Every second page should be dirty.
1859        let (actual, remaining) =
1860            pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap();
1861        assert_eq!(actual + remaining, 25600 / 2);
1862
1863        // Avoid page-aligned sizes to ensure we test the partial page code paths.
1864        let mut offset = vmo_size.saturating_sub(5 * page_size - 2);
1865        // Shrink by 5 pages, then 4 pages. This covers all possible arrangements of
1866        // start/end being on zero and non-zero pages.
1867        'outer: loop {
1868            for delta in [5 * page_size, 4 * page_size] {
1869                file.vmo().set_stream_size(offset).unwrap();
1870                assert_eq!(
1871                    pager.query_dirty_ranges(file.vmo(), offset..vmo_size, &mut buffer).unwrap(),
1872                    (1, 0)
1873                );
1874                // We do not expect to see dirty pages beyond stream size.
1875                assert_eq!(
1876                    buffer[0..1],
1877                    [VmoDirtyRange {
1878                        offset: round_down(offset, page_size),
1879                        length: page_size,
1880                        options: 0
1881                    },]
1882                );
1883                offset = offset.saturating_sub(delta);
1884                if offset == 0 {
1885                    break 'outer;
1886                }
1887            }
1888        }
1889
1890        scope.wait().await;
1891    }
1892
1893    #[fuchsia::test(threads = 2)]
1894    async fn test_grow_unbounded_vmo() {
1895        let scope = ExecutionScope::new();
1896        let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1897        let file = MockFile::new_with_size_and_type(pager.clone(), 128, zx::VmoOptions::UNBOUNDED);
1898
1899        let data = vec![1; 128];
1900        // Overwrite the 128 after the content size;
1901        file.vmo().write(&data, 128).expect("write failed");
1902        // Grow the VMO to include the newly written bytes.
1903        file.vmo().set_stream_size(256).unwrap();
1904        assert_eq!(file.vmo().get_stream_size().expect("get_stream_size"), 256);
1905
1906        let mut data = vec![0xff; 256];
1907        file.vmo().read(&mut data, 0).expect("read");
1908        let expected = vec![0; 256];
1909        assert_eq!(data, expected);
1910
1911        file.vmo().set_stream_size(page_size() * 3).unwrap();
1912        let mut buffer = vec![VmoDirtyRange::default(); 10];
1913        assert_eq!(
1914            pager.query_dirty_ranges(file.vmo(), 0..page_size() * 3, &mut buffer).unwrap(),
1915            (2, 0)
1916        );
1917        // We expect to see only zero pages beyond content size.
1918        assert_eq!(
1919            buffer[0..2],
1920            [
1921                VmoDirtyRange { offset: 0, length: page_size(), options: 0 },
1922                VmoDirtyRange { offset: page_size(), length: page_size() * 2, options: 1 },
1923            ]
1924        );
1925
1926        scope.wait().await;
1927    }
1928}