1use crate::fuchsia::errors::map_to_status;
6use crate::fuchsia::node::FxNode;
7use crate::fuchsia::profile::Recorder;
8use anyhow::Error;
9use bitflags::bitflags;
10use fuchsia_async::epoch::{Epoch, EpochGuard};
11use fuchsia_async::{self as fasync};
12use fuchsia_sync::{Mutex, MutexGuard};
13use fxfs::future_with_guard::FutureWithGuard;
14use fxfs::log::*;
15use fxfs::range::RangeExt;
16use fxfs::round::{round_down, round_up};
17use std::future::Future;
18use std::marker::PhantomData;
19use std::mem::MaybeUninit;
20use std::ops::{Deref, Range};
21use std::sync::atomic::{AtomicU64, Ordering};
22use std::sync::{Arc, Weak};
23use storage_device::buffer;
24use vfs::execution_scope::ExecutionScope;
25use zx::sys::zx_page_request_command_t::{ZX_PAGER_VMO_DIRTY, ZX_PAGER_VMO_READ};
26use zx::{PacketContents, PagerPacket, SignalPacket};
27
28pub static STRONG_FILE_REFS: AtomicU64 = AtomicU64::new(0);
29
30fn watch_for_zero_children(file: &impl PagerBacked) -> Result<(), zx::Status> {
31 file.vmo().wait_async(
32 file.pager().executor.port(),
33 file.pager_packet_receiver_registration().key(),
34 zx::Signals::VMO_ZERO_CHILDREN,
35 zx::WaitAsyncOpts::empty(),
36 )
37}
38
39pub type PagerPacketReceiverRegistration<T> = fasync::ReceiverRegistration<PagerPacketReceiver<T>>;
40
41pub struct PagerPacketReceiver<T> {
43 file: Mutex<FileHolder<T>>,
44}
45
46pub struct PagerPacketReceiverLock<'a, T> {
48 _guard: MutexGuard<'a, FileHolder<T>>,
49 strong: bool,
50}
51
52impl<T> PagerPacketReceiverLock<'_, T> {
53 pub fn is_strong(&self) -> bool {
55 self.strong
56 }
57}
58
59impl<T: PagerBacked> PagerPacketReceiver<T> {
60 pub fn stop_watching_for_zero_children(&self) {
64 let mut file = self.file.lock();
65 if let FileHolder::Strong(strong) = &*file {
66 let weak = FileHolder::Weak(Arc::downgrade(&strong));
67 let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
68 unreachable!();
69 };
70 STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
71 strong.on_zero_children();
72 }
73 }
74
75 pub fn set_receiver(&self, new_receiver: &Arc<T>) -> PagerPacketReceiverLock<'_, T> {
78 let mut receiver_lock = self.file.lock();
79 let strong = match &mut *receiver_lock {
80 FileHolder::Strong(arc) => {
81 *arc = new_receiver.clone();
82 true
83 }
84 FileHolder::Weak(arc) => {
85 *arc = Arc::downgrade(new_receiver);
86 false
87 }
88 };
89 PagerPacketReceiverLock { _guard: receiver_lock, strong }
90 }
91
92 fn receive_pager_packet(&self, contents: PagerPacket) {
93 let command = contents.command();
94 if command != ZX_PAGER_VMO_READ && command != ZX_PAGER_VMO_DIRTY {
95 return;
96 }
97
98 let (file, epoch_guard) = {
99 let file_lock = self.file.lock();
100 let file = match &*file_lock {
101 FileHolder::Strong(file) => file.clone(),
102 FileHolder::Weak(file) => {
103 if let Some(file) = file.upgrade() {
104 file
105 } else {
106 error!("Received a page request for a file that is closed {:?}", contents);
107 return;
108 }
109 }
110 };
111
112 let epoch_guard = match command {
118 ZX_PAGER_VMO_READ => Some(Epoch::global().guard()),
122 _ => None,
123 };
124 (file, epoch_guard)
125 };
126
127 let Some(_scope_guard) = file.pager().scope.try_active_guard() else {
129 file.pager().report_failure(file.vmo(), contents.range(), zx::Status::BAD_STATE);
132 return;
133 };
134 match command {
135 ZX_PAGER_VMO_READ => {
136 file.clone().page_in(PageInRange::new(contents.range(), file, epoch_guard.unwrap()))
137 }
138 ZX_PAGER_VMO_DIRTY => {
139 file.clone().mark_dirty(MarkDirtyRange::new(contents.range(), file))
140 }
141 _ => unreachable!("Unhandled commands are filtered above"),
142 }
143 }
144
145 fn receive_signal_packet(&self, signals: SignalPacket) {
146 assert!(signals.observed().contains(zx::Signals::VMO_ZERO_CHILDREN));
147
148 let mut file = self.file.lock();
153 if let FileHolder::Strong(strong) = &*file {
154 let Some(_guard) = strong.pager().scope.try_active_guard() else {
162 info!("Ignoring zero-children notification due to shutting down");
163 return;
164 };
165 match strong.vmo().info() {
166 Ok(info) => {
167 if info.num_children == 0 {
168 let weak = FileHolder::Weak(Arc::downgrade(&strong));
169 let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
170 unreachable!();
171 };
172 STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
173 strong.on_zero_children();
174 } else {
175 watch_for_zero_children(strong.as_ref()).unwrap();
177 }
178 }
179 Err(e) => error!(error:? = e; "Vmo::info failed"),
180 }
181 }
182 }
183}
184
185impl<T: PagerBacked> fasync::PacketReceiver for PagerPacketReceiver<T> {
186 fn receive_packet(&self, packet: zx::Packet) {
187 match packet.contents() {
188 PacketContents::Pager(contents) => {
189 self.receive_pager_packet(contents);
190 }
191 PacketContents::SignalOne(signals) => {
192 self.receive_signal_packet(signals);
193 }
194 _ => unreachable!(), }
196 }
197}
198
199pub struct Pager {
200 pager: zx::Pager,
201 scope: ExecutionScope,
202 executor: fasync::EHandle,
203 recorder: Mutex<Option<Box<dyn Recorder>>>,
204}
205
206enum FileHolder<T> {
211 Strong(Arc<T>),
212 Weak(Weak<T>),
213}
214
215impl Pager {
217 pub fn new(scope: ExecutionScope) -> Result<Self, Error> {
219 Ok(Pager {
220 pager: zx::Pager::create(zx::PagerOptions::empty())?,
221 scope,
222 executor: fasync::EHandle::local(),
223 recorder: Mutex::new(None),
224 })
225 }
226
227 fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) {
229 if let Some(guard) = self.scope.try_active_guard() {
230 self.executor.spawn_detached(FutureWithGuard::new(guard, task));
231 }
232 }
233
234 pub fn set_recorder(&self, recorder: Option<Box<dyn Recorder>>) {
236 let _old = std::mem::replace(&mut (*self.recorder.lock()), recorder);
238 }
239
240 pub fn recorder(&self) -> MutexGuard<'_, Option<Box<dyn Recorder>>> {
242 self.recorder.lock()
243 }
244
245 pub fn record_page_in<P: PagerBacked>(&self, node: Arc<P>, range: Range<u64>) {
247 let mut recorder_holder = self.recorder.lock();
248 if let Some(recorder) = &mut (*recorder_holder) {
249 if let Err(_) = recorder.record(node, range.start) {
251 *recorder_holder = None;
252 }
253 }
254 }
255
256 pub fn create_vmo<T: PagerBacked>(
258 &self,
259 file: Weak<T>,
260 initial_size: u64,
261 vmo_options: zx::VmoOptions,
262 ) -> Result<(zx::Vmo, PagerPacketReceiverRegistration<T>), Error> {
263 let registration = self
264 .executor
265 .register_receiver(PagerPacketReceiver { file: Mutex::new(FileHolder::Weak(file)) });
266 Ok((
267 self.pager.create_vmo(
268 vmo_options,
269 self.executor.port(),
270 registration.key(),
271 initial_size,
272 )?,
273 registration,
274 ))
275 }
276
277 pub fn watch_for_zero_children(&self, file: &impl PagerBacked) -> Result<bool, Error> {
281 let mut file = file.pager_packet_receiver_registration().file.lock();
282
283 match &*file {
284 FileHolder::Weak(weak) => {
285 let strong = weak.upgrade().unwrap();
287
288 watch_for_zero_children(strong.as_ref())?;
289
290 STRONG_FILE_REFS.fetch_add(1, Ordering::Relaxed);
291 *file = FileHolder::Strong(strong);
292 Ok(true)
293 }
294 FileHolder::Strong(_) => Ok(false),
295 }
296 }
297
298 fn supply_pages(
301 &self,
302 vmo: &zx::Vmo,
303 range: Range<u64>,
304 transfer_vmo: &zx::Vmo,
305 transfer_offset: u64,
306 ) {
307 if let Err(e) = self.pager.supply_pages(vmo, range, transfer_vmo, transfer_offset) {
308 error!(error:? = e; "supply_pages failed");
309 }
310 }
311
312 fn report_failure(&self, vmo: &zx::Vmo, range: Range<u64>, status: zx::Status) {
316 let pager_status = match status {
317 zx::Status::IO_DATA_INTEGRITY => zx::Status::IO_DATA_INTEGRITY,
318 zx::Status::NO_SPACE => zx::Status::NO_SPACE,
319 zx::Status::FILE_BIG => zx::Status::BUFFER_TOO_SMALL,
320 zx::Status::IO
321 | zx::Status::IO_DATA_LOSS
322 | zx::Status::IO_INVALID
323 | zx::Status::IO_MISSED_DEADLINE
324 | zx::Status::IO_NOT_PRESENT
325 | zx::Status::IO_OVERRUN
326 | zx::Status::IO_REFUSED
327 | zx::Status::PEER_CLOSED => zx::Status::IO,
328 _ => zx::Status::BAD_STATE,
329 };
330 if let Err(e) = self.pager.op_range(zx::PagerOp::Fail(pager_status), vmo, range) {
331 error!(error:? = e; "op_range failed");
332 }
333 }
334
335 fn dirty_pages(&self, vmo: &zx::Vmo, range: Range<u64>) -> Result<(), zx::Status> {
338 let result = self.pager.op_range(zx::PagerOp::Dirty, vmo, range);
339 if let Err(e) = &result {
340 error!(error:? = e; "dirty_pages failed");
344 }
345 return result;
346 }
347
348 pub fn writeback_begin(
351 &self,
352 vmo: &zx::Vmo,
353 range: Range<u64>,
354 options: zx::PagerWritebackBeginOptions,
355 ) {
356 if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackBegin(options), vmo, range) {
357 error!(error:? = e; "writeback_begin failed");
358 }
359 }
360
361 pub fn writeback_end(&self, vmo: &zx::Vmo, range: Range<u64>) {
364 if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackEnd, vmo, range) {
365 error!(error:? = e; "writeback_end failed");
366 }
367 }
368
369 pub fn query_dirty_ranges(
374 &self,
375 vmo: &zx::Vmo,
376 range: Range<u64>,
377 buffer: &mut [VmoDirtyRange],
378 ) -> Result<(usize, usize), zx::Status> {
379 let mut actual = 0;
380 let mut avail = 0;
381 let status = unsafe {
382 zx::sys::zx_pager_query_dirty_ranges(
385 self.pager.raw_handle(),
386 vmo.raw_handle(),
387 range.start,
388 range.end - range.start,
389 buffer.as_mut_ptr() as *mut u8,
390 std::mem::size_of_val(buffer),
391 &mut actual as *mut usize,
392 &mut avail as *mut usize,
393 )
394 };
395 zx::ok(status).map(|_| (actual, avail - actual))
396 }
397
398 pub fn query_vmo_stats(
402 &self,
403 vmo: &zx::Vmo,
404 options: PagerVmoStatsOptions,
405 ) -> Result<PagerVmoStats, zx::Status> {
406 #[repr(C)]
407 #[derive(Default)]
408 struct zx_pager_vmo_stats {
409 pub modified: u32,
410 }
411 const ZX_PAGER_VMO_STATS_MODIFIED: u32 = 1;
412 let mut vmo_stats = MaybeUninit::<zx_pager_vmo_stats>::uninit();
413 let status = unsafe {
414 zx::sys::zx_pager_query_vmo_stats(
417 self.pager.raw_handle(),
418 vmo.raw_handle(),
419 options.bits(),
420 vmo_stats.as_mut_ptr() as *mut u8,
421 std::mem::size_of::<zx_pager_vmo_stats>(),
422 )
423 };
424 zx::ok(status)?;
425 let vmo_stats = unsafe { vmo_stats.assume_init() };
426 Ok(PagerVmoStats { was_vmo_modified: vmo_stats.modified == ZX_PAGER_VMO_STATS_MODIFIED })
427 }
428
429 pub async fn page_in_barrier() {
430 Epoch::global().barrier().await;
431 }
432}
433
434pub trait PagerBacked: FxNode + Sync + Send + Sized + 'static {
436 fn pager(&self) -> &Pager;
438
439 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self>;
441
442 fn vmo(&self) -> &zx::Vmo;
445
446 fn page_in(self: Arc<Self>, range: PageInRange<Self>);
450
451 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>);
455
456 fn on_zero_children(self: Arc<Self>);
458
459 fn byte_size(&self) -> u64;
461
462 fn aligned_read(
469 &self,
470 aligned_byte_range: std::ops::Range<u64>,
471 ) -> impl Future<Output = Result<buffer::Buffer<'_>, Error>> + Send;
472}
473
474pub fn default_page_in<P: PagerBacked>(
476 this: Arc<P>,
477 pager_range: PageInRange<P>,
478 read_ahead_size: u64,
479) {
480 fxfs_trace::duration!(
481 "start-page-in",
482 "offset" => pager_range.start(),
483 "len" => pager_range.len()
484 );
485
486 const ZERO_VMO_SIZE: u64 = 1_048_576;
487 static ZERO_VMO: std::sync::LazyLock<zx::Vmo> =
488 std::sync::LazyLock::new(|| zx::Vmo::create(ZERO_VMO_SIZE).unwrap());
489
490 assert!(pager_range.end() < i64::MAX as u64);
491
492 let page_aligned_size = round_up(this.byte_size(), page_size()).unwrap();
508
509 let (read_range, zero_range) = pager_range.split(page_aligned_size);
513 if let Some(zero_range) = zero_range {
514 for range in zero_range.chunks(ZERO_VMO_SIZE) {
515 range.supply_pages(&ZERO_VMO, 0);
516 }
517 }
518
519 if let Some(read_range) = read_range {
520 let expanded_range_for_readahead = round_down(read_range.start(), read_ahead_size)
521 ..std::cmp::min(
522 round_up(read_range.end(), read_ahead_size).unwrap(),
523 page_aligned_size,
524 );
525 let read_range = read_range.expand(expanded_range_for_readahead);
526 for range in read_range.chunks(read_ahead_size) {
527 this.pager().record_page_in(this.clone(), range.range.clone());
535
536 this.pager().spawn(page_in_chunk(this.clone(), range));
537 }
538 }
539}
540
541#[fxfs_trace::trace("offset" => read_range.start(), "len" => read_range.len())]
542async fn page_in_chunk<P: PagerBacked>(this: Arc<P>, read_range: PageInRange<P>) {
543 let buffer = match this.aligned_read(read_range.range()).await {
544 Ok(v) => v,
545 Err(error) => {
546 error!(range:? = read_range.range(), error:?; "Failed to load range");
547 read_range.report_failure(map_to_status(error));
548 return;
549 }
550 };
551 assert!(
552 buffer.len() as u64 >= read_range.len(),
553 "A buffer smaller than requested was returned. requested: {}, returned: {}",
554 read_range.len(),
555 buffer.len()
556 );
557 read_range.supply_pages(buffer.allocator().buffer_source().vmo(), buffer.range().start as u64);
558}
559
560#[repr(C)]
562#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
563pub struct VmoDirtyRange {
564 offset: u64,
565 length: u64,
566 options: u64,
567}
568
569impl VmoDirtyRange {
570 pub fn range(&self) -> Range<u64> {
572 self.offset..(self.offset + self.length)
573 }
574
575 pub fn is_zero_range(&self) -> bool {
577 self.options & zx::sys::ZX_VMO_DIRTY_RANGE_IS_ZERO != 0
578 }
579}
580
581bitflags! {
582 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
584 #[repr(transparent)]
585 pub struct PagerVmoStatsOptions: u32 {
586 const RESET_VMO_STATS = 1;
588 }
589}
590
591#[derive(Debug)]
593pub struct PagerVmoStats {
594 was_vmo_modified: bool,
595}
596
597impl PagerVmoStats {
598 pub fn was_vmo_modified(&self) -> bool {
600 self.was_vmo_modified
601 }
602}
603
604#[inline]
605fn page_size() -> u64 {
606 zx::system_get_page_size().into()
607}
608
609pub trait PagerRequestType {
611 fn request_type_name() -> &'static str;
613}
614
615pub struct PageInRequest;
617
618impl PagerRequestType for PageInRequest {
619 fn request_type_name() -> &'static str {
620 "PageInRequest"
621 }
622}
623
624pub type PageInRange<T> = PagerRange<T, PageInRequest>;
627
628impl<T: PagerBacked> PageInRange<T> {
629 pub fn new(range: Range<u64>, file: Arc<T>, epoch_guard: EpochGuard<'static>) -> Self {
631 debug_assert!(
632 range.start % page_size() == 0 && range.end % page_size() == 0,
633 "{:?} is not page aligned",
634 range
635 );
636 Self {
637 range,
638 inner: Some(PagerRangeInner { file, _epoch_guard: Some(epoch_guard) }),
639 _request_type: PhantomData,
640 }
641 }
642
643 pub fn supply_pages(mut self, transfer_vmo: &zx::Vmo, transfer_offset: u64) {
646 let inner = self.inner.take().unwrap();
647 inner.file.pager().supply_pages(
648 inner.file.vmo(),
649 self.range.clone(),
650 transfer_vmo,
651 transfer_offset,
652 );
653 }
654}
655
656#[derive(Debug)]
658pub struct MarkDirtyRequest;
659
660impl PagerRequestType for MarkDirtyRequest {
661 fn request_type_name() -> &'static str {
662 "MarkDirtyRequest"
663 }
664}
665
666pub type MarkDirtyRange<T> = PagerRange<T, MarkDirtyRequest>;
669
670impl<T: PagerBacked> MarkDirtyRange<T> {
671 pub fn new(range: Range<u64>, file: Arc<T>) -> Self {
673 debug_assert!(
674 range.start % page_size() == 0 && range.end % page_size() == 0,
675 "{:?} is not page aligned",
676 range
677 );
678 Self {
679 range,
680 inner: Some(PagerRangeInner { file, _epoch_guard: None }),
681 _request_type: PhantomData,
682 }
683 }
684
685 pub fn dirty_pages(mut self) -> Result<(), zx::Status> {
688 let inner = self.inner.take().unwrap();
689 inner.file.pager().dirty_pages(inner.file.vmo(), self.range.clone())
690 }
691}
692
693#[derive(Clone)]
694struct PagerRangeInner<T: std::clone::Clone + Deref<Target: PagerBacked>> {
695 file: T,
698
699 _epoch_guard: Option<EpochGuard<'static>>,
702}
703
704pub struct PagerRange<T: PagerBacked, U: PagerRequestType> {
707 range: Range<u64>,
708
709 inner: Option<PagerRangeInner<Arc<T>>>,
711
712 _request_type: PhantomData<U>,
713}
714
715impl<T: PagerBacked, U: PagerRequestType> PagerRange<T, U> {
716 pub fn split(mut self, split_point: u64) -> (Option<Self>, Option<Self>) {
720 let inner = self.inner.take().unwrap();
721 let (left, right) = self.range.clone().split(split_point);
722 let right = right.map(|range| Self {
723 range,
724 inner: Some(inner.clone()),
725 _request_type: PhantomData,
726 });
727 let left = left.map(|range| Self { range, inner: Some(inner), _request_type: PhantomData });
728 (left, right)
729 }
730
731 pub fn expand(mut self, new_range: Range<u64>) -> Self {
734 assert!(
735 self.range.start >= new_range.start && self.range.end <= new_range.end,
736 "{:?} is not a subset of {:?}",
737 self.range,
738 new_range
739 );
740 debug_assert!(
741 new_range.start % page_size() == 0 && new_range.end % page_size() == 0,
742 "{:?} is not page aligned",
743 new_range
744 );
745 self.range = new_range;
746 self
747 }
748
749 pub fn chunks(mut self, chunk_size: u64) -> PagerRangeChunksIter<T, U> {
754 debug_assert!(
755 chunk_size % page_size() == 0,
756 "{} is not a multiple of the page size",
757 chunk_size
758 );
759 PagerRangeChunksIter {
760 start: self.range.start,
761 end: self.range.end,
762 chunk_size: chunk_size,
763 inner: self.inner.take(),
764 _request_type: PhantomData,
765 }
766 }
767
768 #[inline]
769 pub fn start(&self) -> u64 {
770 self.range.start
771 }
772
773 #[inline]
774 pub fn end(&self) -> u64 {
775 self.range.end
776 }
777
778 #[inline]
779 pub fn len(&self) -> u64 {
780 self.range.end - self.range.start
781 }
782
783 #[inline]
784 pub fn range(&self) -> Range<u64> {
785 self.range.clone()
786 }
787
788 pub fn report_failure(mut self, status: zx::Status) {
791 let inner = self.inner.take().unwrap();
792 inner.file.pager().report_failure(inner.file.vmo(), self.range.clone(), status);
793 }
794
795 #[cfg(test)]
797 fn consume(mut self) {
798 self.inner.take().unwrap();
799 }
800}
801
802impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRange<T, U> {
803 fn drop(&mut self) {
804 if let Some(inner) = &self.inner {
805 let request_type = U::request_type_name();
806 let range = self.range.clone();
807 let key = inner.file.pager_packet_receiver_registration().key();
808 if cfg!(debug_assertions) {
809 if !std::thread::panicking() {
813 panic!(
814 "PagerRange was dropped without sending a response, \
815 request_type={request_type}, range={range:?}, key={key}",
816 );
817 }
818 } else {
819 error!(
820 "PagerRange was dropped without sending a response, \
821 request_type={request_type}, range={range:?}, key={key}",
822 );
823 inner.file.pager().report_failure(inner.file.vmo(), range, zx::Status::BAD_STATE);
824 }
825 }
826 }
827}
828
829pub struct PagerRangeChunksIter<T: PagerBacked, U: PagerRequestType> {
832 start: u64,
833 end: u64,
834 chunk_size: u64,
835 inner: Option<PagerRangeInner<Arc<T>>>,
837 _request_type: PhantomData<U>,
838}
839
840impl<T: PagerBacked, U: PagerRequestType> Iterator for PagerRangeChunksIter<T, U> {
841 type Item = PagerRange<T, U>;
842 fn next(&mut self) -> Option<Self::Item> {
843 if self.start == self.end {
844 None
845 } else if self.start + self.chunk_size >= self.end {
846 let next = Self::Item {
847 range: self.start..self.end,
848 inner: self.inner.take(),
849 _request_type: PhantomData,
850 };
851 self.start = self.end;
852 Some(next)
853 } else {
854 let next_end = self.start + self.chunk_size;
855 let next = Self::Item {
856 range: self.start..next_end,
857 inner: self.inner.clone(),
858 _request_type: PhantomData,
859 };
860 self.start = next_end;
861 Some(next)
862 }
863 }
864}
865
866impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRangeChunksIter<T, U> {
867 fn drop(&mut self) {
868 if self.start != self.end {
869 let request_type = U::request_type_name();
870 let remaining = self.start..self.end;
871 let inner = self.inner.take().unwrap();
872 let key = inner.file.pager_packet_receiver_registration().key();
873 if cfg!(debug_assertions) {
874 if !std::thread::panicking() {
878 panic!(
879 "PagerRangeChunksIter was dropped without being fully consumed, \
880 request_type={request_type}, remaining={remaining:?}, key={key}",
881 );
882 }
883 } else {
884 error!(
885 "PagerRangeChunksIter was dropped without being fully consumed, \
886 request_type={request_type}, remaining={remaining:?}, key={key}",
887 );
888 inner.file.pager().report_failure(
889 inner.file.vmo(),
890 remaining,
891 zx::Status::BAD_STATE,
892 );
893 }
894 }
895 }
896}
897
898#[cfg(test)]
899mod tests {
900 use super::*;
901 use futures::StreamExt;
902 use futures::channel::mpsc;
903 use fxfs_macros::ToWeakNode;
904
905 #[derive(Clone, Debug, PartialEq, Eq)]
906 enum PagerRequest {
907 PageIn(Range<u64>),
908 Dirty(Range<u64>),
909 }
910
911 #[derive(ToWeakNode)]
912 struct MockFile {
913 vmo: zx::Vmo,
914 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
915 pager: Arc<Pager>,
916 pager_requests: Mutex<Vec<PagerRequest>>,
918 }
919
920 impl MockFile {
921 fn new(pager: Arc<Pager>) -> Arc<Self> {
922 Self::new_with_size_and_type(pager, page_size(), zx::VmoOptions::UNBOUNDED)
923 }
924
925 fn new_with_size_and_type(
926 pager: Arc<Pager>,
927 size: u64,
928 vmo_type: zx::VmoOptions,
929 ) -> Arc<Self> {
930 Arc::new_cyclic(|weak| {
931 let (vmo, pager_packet_receiver_registration) = pager
932 .create_vmo(weak.clone(), size, vmo_type | zx::VmoOptions::TRAP_DIRTY)
933 .unwrap();
934 Self {
935 pager,
936 vmo,
937 pager_packet_receiver_registration,
938 pager_requests: Default::default(),
939 }
940 })
941 }
942
943 fn pager_requests(&self, reset: bool) -> Vec<PagerRequest> {
945 if reset {
946 std::mem::take(&mut *self.pager_requests.lock())
947 } else {
948 self.pager_requests.lock().clone()
949 }
950 }
951 }
952
953 impl FxNode for MockFile {
954 fn object_id(&self) -> u64 {
955 unimplemented!();
956 }
957
958 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
959 unimplemented!();
960 }
961
962 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
963 unimplemented!();
964 }
965
966 fn open_count_add_one(&self) {
967 unimplemented!();
968 }
969
970 fn open_count_sub_one(self: Arc<Self>) {
971 unimplemented!();
972 }
973
974 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
975 unimplemented!();
976 }
977 }
978
979 impl PagerBacked for MockFile {
980 fn pager(&self) -> &Pager {
981 &self.pager
982 }
983
984 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
985 &self.pager_packet_receiver_registration
986 }
987
988 fn vmo(&self) -> &zx::Vmo {
989 &self.vmo
990 }
991
992 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
993 let aux_vmo = zx::Vmo::create(range.len()).unwrap();
994 self.pager_requests.lock().push(PagerRequest::PageIn(range.range()));
995 range.supply_pages(&aux_vmo, 0);
996 }
997
998 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
999 self.pager_requests.lock().push(PagerRequest::Dirty(range.range()));
1000 let _ = range.dirty_pages();
1001 }
1002
1003 fn on_zero_children(self: Arc<Self>) {}
1004
1005 fn byte_size(&self) -> u64 {
1006 unimplemented!();
1007 }
1008 async fn aligned_read(
1009 &self,
1010 _aligned_byte_range: std::ops::Range<u64>,
1011 ) -> Result<buffer::Buffer<'_>, Error> {
1012 unimplemented!();
1013 }
1014 }
1015
1016 #[derive(ToWeakNode)]
1017 struct OnZeroChildrenFile {
1018 pager: Arc<Pager>,
1019 vmo: zx::Vmo,
1020 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1021 sender: Mutex<mpsc::UnboundedSender<()>>,
1022 }
1023
1024 impl OnZeroChildrenFile {
1025 fn new(pager: Arc<Pager>, sender: mpsc::UnboundedSender<()>) -> Arc<Self> {
1026 Arc::new_cyclic(|weak| {
1027 let (vmo, pager_packet_receiver_registration) =
1028 pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1029 Self { pager, vmo, pager_packet_receiver_registration, sender: Mutex::new(sender) }
1030 })
1031 }
1032 }
1033
1034 impl FxNode for OnZeroChildrenFile {
1035 fn object_id(&self) -> u64 {
1036 unimplemented!();
1037 }
1038
1039 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1040 unimplemented!();
1041 }
1042
1043 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1044 unimplemented!();
1045 }
1046
1047 fn open_count_add_one(&self) {
1048 unimplemented!();
1049 }
1050
1051 fn open_count_sub_one(self: Arc<Self>) {
1052 unimplemented!();
1053 }
1054
1055 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1056 unimplemented!();
1057 }
1058 }
1059
1060 impl PagerBacked for OnZeroChildrenFile {
1061 fn pager(&self) -> &Pager {
1062 &self.pager
1063 }
1064
1065 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1066 &self.pager_packet_receiver_registration
1067 }
1068
1069 fn vmo(&self) -> &zx::Vmo {
1070 &self.vmo
1071 }
1072
1073 fn page_in(self: Arc<Self>, _range: PageInRange<Self>) {
1074 unreachable!();
1075 }
1076
1077 fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1078 unreachable!();
1079 }
1080
1081 fn on_zero_children(self: Arc<Self>) {
1082 self.sender.lock().unbounded_send(()).unwrap();
1083 }
1084 fn byte_size(&self) -> u64 {
1085 unreachable!();
1086 }
1087 async fn aligned_read(
1088 &self,
1089 _aligned_byte_range: std::ops::Range<u64>,
1090 ) -> Result<buffer::Buffer<'_>, Error> {
1091 unreachable!();
1092 }
1093 }
1094
1095 #[fuchsia::test(threads = 2)]
1096 async fn test_watch_for_zero_children() {
1097 let (sender, mut receiver) = mpsc::unbounded();
1098 let scope = ExecutionScope::new();
1099 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1100 let file = OnZeroChildrenFile::new(pager.clone(), sender);
1101 {
1102 let _child_vmo = file
1103 .vmo()
1104 .create_child(
1105 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1106 0,
1107 file.vmo().get_content_size().unwrap(),
1108 )
1109 .unwrap();
1110 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1111 }
1112 receiver.next().await.unwrap();
1114
1115 scope.wait().await;
1116 }
1117
1118 #[fuchsia::test(threads = 2)]
1119 async fn test_multiple_watch_for_zero_children_calls() {
1120 let (sender, mut receiver) = mpsc::unbounded();
1121 let scope = ExecutionScope::new();
1122 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1123 let file = OnZeroChildrenFile::new(pager.clone(), sender);
1124 {
1125 let _child_vmo = file
1126 .vmo()
1127 .create_child(
1128 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1129 0,
1130 file.vmo().get_content_size().unwrap(),
1131 )
1132 .unwrap();
1133 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1134 assert!(!pager.watch_for_zero_children(file.as_ref()).unwrap());
1136 }
1137 receiver.next().await.unwrap();
1138
1139 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1143
1144 file.pager_packet_receiver_registration.stop_watching_for_zero_children();
1145
1146 scope.wait().await;
1147 }
1148
1149 #[fuchsia::test(threads = 2)]
1150 async fn test_status_code_mapping() {
1151 #[derive(ToWeakNode)]
1152 struct StatusCodeFile {
1153 vmo: zx::Vmo,
1154 pager: Arc<Pager>,
1155 status_code: Mutex<zx::Status>,
1156 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1157 }
1158
1159 impl FxNode for StatusCodeFile {
1160 fn object_id(&self) -> u64 {
1161 unimplemented!();
1162 }
1163
1164 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1165 unimplemented!();
1166 }
1167
1168 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1169 unimplemented!();
1170 }
1171
1172 fn open_count_add_one(&self) {
1173 unimplemented!();
1174 }
1175
1176 fn open_count_sub_one(self: Arc<Self>) {
1177 unimplemented!();
1178 }
1179
1180 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1181 unimplemented!();
1182 }
1183 }
1184
1185 impl PagerBacked for StatusCodeFile {
1186 fn pager(&self) -> &Pager {
1187 &self.pager
1188 }
1189
1190 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1191 &self.pager_packet_receiver_registration
1192 }
1193
1194 fn vmo(&self) -> &zx::Vmo {
1195 &self.vmo
1196 }
1197
1198 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1199 range.report_failure(*self.status_code.lock());
1200 }
1201
1202 fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1203 unreachable!();
1204 }
1205
1206 fn on_zero_children(self: Arc<Self>) {
1207 unreachable!();
1208 }
1209
1210 fn byte_size(&self) -> u64 {
1211 unreachable!();
1212 }
1213 async fn aligned_read(
1214 &self,
1215 _aligned_byte_range: std::ops::Range<u64>,
1216 ) -> Result<buffer::Buffer<'_>, Error> {
1217 unreachable!();
1218 }
1219 }
1220
1221 let scope = ExecutionScope::new();
1222 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1223 let file = Arc::new_cyclic(|weak| {
1224 let (vmo, pager_packet_receiver_registration) =
1225 pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1226 StatusCodeFile {
1227 vmo,
1228 pager: pager.clone(),
1229 status_code: Mutex::new(zx::Status::INTERNAL),
1230 pager_packet_receiver_registration,
1231 }
1232 });
1233
1234 fn check_mapping(
1235 file: &StatusCodeFile,
1236 failure_code: zx::Status,
1237 expected_code: zx::Status,
1238 ) {
1239 {
1240 *file.status_code.lock() = failure_code;
1241 }
1242 let mut buf = [0u8; 8];
1243 assert_eq!(file.vmo().read(&mut buf, 0).unwrap_err(), expected_code);
1244 }
1245 check_mapping(&file, zx::Status::IO_DATA_INTEGRITY, zx::Status::IO_DATA_INTEGRITY);
1246 check_mapping(&file, zx::Status::NO_SPACE, zx::Status::NO_SPACE);
1247 check_mapping(&file, zx::Status::FILE_BIG, zx::Status::BUFFER_TOO_SMALL);
1248 check_mapping(&file, zx::Status::IO, zx::Status::IO);
1249 check_mapping(&file, zx::Status::IO_DATA_LOSS, zx::Status::IO);
1250 check_mapping(&file, zx::Status::NOT_EMPTY, zx::Status::BAD_STATE);
1251 check_mapping(&file, zx::Status::BAD_STATE, zx::Status::BAD_STATE);
1252
1253 scope.wait().await;
1254 }
1255
1256 #[fuchsia::test(threads = 2)]
1257 async fn test_query_vmo_stats() {
1258 let scope = ExecutionScope::new();
1259 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1260 let file = MockFile::new(pager.clone());
1261
1262 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1263 assert!(!stats.was_vmo_modified());
1265
1266 file.vmo().write(&[0, 1, 2, 3, 4], 0).unwrap();
1267 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1268 assert!(stats.was_vmo_modified());
1269
1270 let stats =
1272 pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::RESET_VMO_STATS).unwrap();
1273 assert!(stats.was_vmo_modified());
1275
1276 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1277 assert!(!stats.was_vmo_modified());
1278
1279 scope.wait().await;
1280 }
1281
1282 #[fuchsia::test(threads = 2)]
1283 async fn test_query_dirty_ranges() {
1284 let scope = ExecutionScope::new();
1293 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1294 let file = MockFile::new_with_size_and_type(
1295 pager.clone(),
1296 page_size() + page_size() / 2,
1297 zx::VmoOptions::UNBOUNDED,
1298 );
1299 let mut buffer = vec![VmoDirtyRange::default(); 2];
1300
1301 let page_size = page_size();
1302 assert_eq!(file.vmo().get_content_size().unwrap(), page_size + page_size / 2);
1303
1304 let (actual, remaining) =
1305 pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1306 assert_eq!(actual, 0);
1307 assert_eq!(remaining, 0);
1308
1309 file.vmo().set_stream_size(page_size * 7 + page_size / 2).unwrap();
1311
1312 let (actual, remaining) =
1313 pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1314 assert_eq!(actual, 2);
1315 assert_eq!(remaining, 0);
1316 assert_eq!(buffer[0].range(), page_size..page_size * 2);
1318 assert!(!buffer[0].is_zero_range());
1319 assert_eq!(buffer[1].range(), page_size * 2..page_size * 8);
1321 assert!(buffer[1].is_zero_range());
1322
1323 assert_eq!(
1326 file.pager_requests(true),
1327 vec![
1328 PagerRequest::PageIn(page_size * 1..page_size * 2),
1329 PagerRequest::Dirty(page_size * 1..page_size * 2),
1330 ]
1331 );
1332
1333 file.vmo().write(&[1, 2, 3, 4], page_size).unwrap();
1335 file.vmo().write(&[1, 2, 3, 4], page_size * 2).unwrap();
1336 file.vmo().write(&[1, 2, 3, 4], page_size * 4).unwrap();
1337
1338 assert_eq!(
1341 file.pager_requests(true),
1342 vec![
1343 PagerRequest::Dirty(page_size * 2..page_size * 3),
1344 PagerRequest::Dirty(page_size * 4..page_size * 5)
1345 ]
1346 );
1347
1348 let (actual, remaining) =
1349 pager.query_dirty_ranges(file.vmo(), 0..page_size * 7, &mut buffer).unwrap();
1350 assert_eq!(actual, 2);
1351 assert_eq!(remaining, 2);
1352 assert_eq!(buffer[0].range(), page_size..(page_size * 3));
1354 assert!(!buffer[0].is_zero_range());
1355 assert_eq!(buffer[1].range(), (page_size * 3)..(page_size * 4));
1357 assert!(buffer[1].is_zero_range());
1358
1359 let (actual, remaining) = pager
1360 .query_dirty_ranges(file.vmo(), page_size * 4..page_size * 7, &mut buffer)
1361 .unwrap();
1362 assert_eq!(actual, 2);
1363 assert_eq!(remaining, 0);
1364 assert_eq!(buffer[0].range(), (page_size * 4)..(page_size * 5));
1366 assert!(!buffer[0].is_zero_range());
1367 assert_eq!(buffer[1].range(), (page_size * 5)..(page_size * 7));
1369 assert!(buffer[1].is_zero_range());
1370
1371 let mut read_buf = vec![0u8; page_size as usize];
1373 file.vmo().read(&mut read_buf, page_size * 3).expect("read");
1374 let expected = vec![0u8; page_size as usize];
1375 assert_eq!(read_buf, expected);
1376 assert_eq!(file.pager_requests(true), vec![]);
1377
1378 scope.wait().await;
1379 }
1380
1381 #[fuchsia::test(threads = 2)]
1382 async fn test_zero_grown_vmo() {
1383 let scope = ExecutionScope::new();
1385 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1386 let file = MockFile::new(pager.clone());
1387
1388 let write_buf = vec![0xff; page_size() as usize * 2];
1389 file.vmo().set_stream_size(page_size() * 2).expect("grow");
1390 file.vmo().write(&write_buf, 0).expect("write");
1391 let mut read_buf = vec![0u8; page_size() as usize * 2];
1392 file.vmo().read(&mut read_buf, 0).expect("read");
1394 assert_eq!(read_buf, write_buf);
1395
1396 file.vmo().set_stream_size(page_size() + 1).expect("shrink");
1398 file.vmo().write(&[0xff; 3], page_size() + 2).expect("write after shrink");
1399 file.vmo().set_stream_size(page_size() + 4).expect("grow again");
1401 let mut read_buf = vec![0u8; page_size() as usize];
1402 file.vmo().read(&mut read_buf, page_size()).expect("read");
1403 let mut expected = vec![0u8; page_size() as usize];
1404 expected[0] = 0xff;
1405 assert_eq!(read_buf, expected);
1406
1407 scope.wait().await;
1408 }
1409
1410 #[fuchsia::test]
1411 async fn test_pager_range_chunks_iter_chunks() {
1412 let scope = ExecutionScope::new();
1413 let pager = Arc::new(Pager::new(scope).unwrap());
1414 let file = MockFile::new(pager.clone());
1415
1416 let pager_range = PageInRange::new(0..page_size() * 5, file, Epoch::global().guard());
1417 let ranges: Vec<Range<u64>> = pager_range
1418 .chunks(page_size() * 2)
1419 .map(|pager_range| {
1420 let range = pager_range.range();
1421 pager_range.consume();
1422 range
1423 })
1424 .collect();
1425 assert_eq!(
1426 ranges,
1427 [
1428 0..page_size() * 2,
1429 page_size() * 2..page_size() * 4,
1430 page_size() * 4..page_size() * 5
1431 ]
1432 );
1433 }
1434
1435 #[fuchsia::test]
1436 async fn test_pager_range_split() {
1437 let scope = ExecutionScope::new();
1438 let pager = Arc::new(Pager::new(scope).unwrap());
1439 let file = MockFile::new(pager.clone());
1440
1441 let pager_range = PageInRange::new(0..page_size() * 10, file, Epoch::global().guard());
1442 let (left, right) = pager_range.split(page_size() * 5);
1443 let (left, right) = (left.unwrap(), right.unwrap());
1444 assert_eq!(left.range(), 0..page_size() * 5);
1445 assert_eq!(right.range(), page_size() * 5..page_size() * 10);
1446
1447 left.consume();
1448 right.consume();
1449 }
1450
1451 #[fuchsia::test]
1452 #[should_panic(expected = "0..8192 is not a subset of 0..4096")]
1453 async fn test_pager_range_bad_expand_panics() {
1454 let scope = ExecutionScope::new();
1455 let pager = Arc::new(Pager::new(scope).unwrap());
1456 let file = MockFile::new(pager.clone());
1457
1458 let pager_range = PageInRange::new(0..page_size() * 2, file, Epoch::global().guard());
1459 pager_range.expand(0..page_size()).consume();
1460 }
1461
1462 #[derive(ToWeakNode)]
1463 struct PagerRangeTestFile {
1464 vmo: zx::Vmo,
1465 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1466 pager: Pager,
1467 page_in_fn: Box<dyn Fn(PageInRange<Self>) + Send + Sync + 'static>,
1468 mark_dirty_fn: Box<dyn Fn(MarkDirtyRange<Self>) + Send + Sync + 'static>,
1469 }
1470
1471 impl PagerRangeTestFile {
1472 fn new<
1473 F1: Fn(PageInRange<Self>) + Send + Sync + 'static,
1474 F2: Fn(MarkDirtyRange<Self>) + Send + Sync + 'static,
1475 >(
1476 page_in_fn: F1,
1477 mark_dirty_fn: F2,
1478 ) -> Arc<Self> {
1479 Arc::new_cyclic(|weak| {
1480 let pager = Pager::new(ExecutionScope::new()).unwrap();
1481 let (vmo, pager_packet_receiver_registration) = pager
1482 .create_vmo(weak.clone(), page_size() * 2, zx::VmoOptions::TRAP_DIRTY)
1483 .unwrap();
1484 Self {
1485 vmo,
1486 pager_packet_receiver_registration,
1487 pager,
1488 page_in_fn: Box::new(page_in_fn),
1489 mark_dirty_fn: Box::new(mark_dirty_fn),
1490 }
1491 })
1492 }
1493 }
1494
1495 impl FxNode for PagerRangeTestFile {
1496 fn object_id(&self) -> u64 {
1497 1
1498 }
1499
1500 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1501 unimplemented!()
1502 }
1503
1504 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1505 unimplemented!()
1506 }
1507
1508 fn open_count_add_one(&self) {
1509 unimplemented!()
1510 }
1511
1512 fn open_count_sub_one(self: Arc<Self>) {
1513 unimplemented!()
1514 }
1515
1516 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1517 unimplemented!()
1518 }
1519 }
1520
1521 impl PagerBacked for PagerRangeTestFile {
1522 fn pager(&self) -> &Pager {
1523 &self.pager
1524 }
1525
1526 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1527 &self.pager_packet_receiver_registration
1528 }
1529
1530 fn vmo(&self) -> &zx::Vmo {
1531 &self.vmo
1532 }
1533
1534 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1535 (self.page_in_fn)(range)
1536 }
1537
1538 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1539 (self.mark_dirty_fn)(range)
1540 }
1541
1542 fn on_zero_children(self: Arc<Self>) {}
1543
1544 fn byte_size(&self) -> u64 {
1545 unimplemented!();
1546 }
1547
1548 async fn aligned_read(
1549 &self,
1550 _range: std::ops::Range<u64>,
1551 ) -> Result<buffer::Buffer<'_>, Error> {
1552 unimplemented!();
1553 }
1554 }
1555
1556 fn real_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1557 let aux_vmo = zx::Vmo::create(range.len()).unwrap();
1558 range.supply_pages(&aux_vmo, 0);
1559 }
1560
1561 fn real_mark_dirty(range: MarkDirtyRange<PagerRangeTestFile>) {
1562 let _ = range.dirty_pages();
1563 }
1564
1565 #[fuchsia::test(threads = 2)]
1566 async fn test_page_in_range_supply_pages() {
1567 let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1568
1569 let mut data = vec![0; 20];
1570 file.vmo.read(&mut data, 0).unwrap();
1571 }
1572
1573 #[fuchsia::test(threads = 2)]
1574 async fn test_page_in_range_report_failure() {
1575 let file = PagerRangeTestFile::new(
1576 |range| {
1577 range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1578 },
1579 real_mark_dirty,
1580 );
1581
1582 let mut data = vec![0; 20];
1583 let err = file.vmo.read(&mut data, 0).unwrap_err();
1584 assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1585 }
1586
1587 #[cfg(debug_assertions)]
1588 #[fuchsia::test(threads = 2)]
1589 #[should_panic(expected = "PagerRange was dropped without sending a response")]
1590 async fn test_page_in_range_dropped() {
1591 let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1592
1593 let mut data = vec![0; 20];
1594 file.vmo.read(&mut data, 0).unwrap_err();
1595 }
1596
1597 #[cfg(not(debug_assertions))]
1598 #[fuchsia::test(threads = 2)]
1599 async fn test_page_in_range_dropped() {
1600 let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1601
1602 let mut data = vec![0; 20];
1603 let err = file.vmo.read(&mut data, 0).unwrap_err();
1604 assert_eq!(err, zx::Status::BAD_STATE);
1605 }
1606
1607 #[fuchsia::test(threads = 2)]
1608 async fn test_mark_dirty_range_dirty_pages() {
1609 let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1610
1611 let data = vec![5; 20];
1612 file.vmo.write(&data, 0).unwrap();
1613 }
1614
1615 #[fuchsia::test(threads = 2)]
1616 async fn test_mark_dirty_range_report_failure() {
1617 let file = PagerRangeTestFile::new(real_supply_pages, |range| {
1618 range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1619 });
1620
1621 let data = vec![5; 20];
1622 let err = file.vmo.write(&data, 0).unwrap_err();
1623 assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1624 }
1625
1626 #[cfg(debug_assertions)]
1627 #[fuchsia::test(threads = 2)]
1628 #[should_panic(expected = "PagerRange was dropped without sending a response")]
1629 async fn test_mark_dirty_range_dropped() {
1630 let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1631
1632 let data = vec![5; 20];
1633 file.vmo.write(&data, 0).unwrap_err();
1634 }
1635
1636 #[cfg(not(debug_assertions))]
1637 #[fuchsia::test(threads = 2)]
1638 async fn test_mark_dirty_range_dropped() {
1639 let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1640
1641 let data = vec![5; 20];
1642 let err = file.vmo.write(&data, 0).unwrap_err();
1643 assert_eq!(err, zx::Status::BAD_STATE);
1644 }
1645
1646 #[fuchsia::test(threads = 2)]
1647 async fn test_pager_range_chunks_iter_consumed() {
1648 let file = PagerRangeTestFile::new(
1649 |range| {
1650 let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1651 range.expand(0..page_size() * 2).chunks(page_size()).for_each(|range| {
1652 range.supply_pages(&aux_vmo, 0);
1653 });
1654 },
1655 real_mark_dirty,
1656 );
1657
1658 let mut data = vec![0; 20];
1659 file.vmo.read(&mut data, 0).unwrap();
1660 }
1661
1662 fn partial_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1663 let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1664 range.expand(0..page_size() * 2).chunks(page_size()).take(1).for_each(|range| {
1667 range.supply_pages(&aux_vmo, 0);
1668 });
1669 }
1670
1671 #[cfg(debug_assertions)]
1672 #[fuchsia::test(threads = 2)]
1673 #[should_panic(expected = "PagerRangeChunksIter was dropped without being fully consumed")]
1674 async fn test_pager_range_chunks_iter_dropped() {
1675 let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1676
1677 let mut data = vec![0; 20];
1678 file.vmo.read(&mut data, page_size()).unwrap_err();
1681 }
1682
1683 #[cfg(not(debug_assertions))]
1684 #[fuchsia::test(threads = 2)]
1685 async fn test_pager_range_chunks_iter_dropped() {
1686 let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1687
1688 let mut data = vec![0; 20];
1689 let err = file.vmo.read(&mut data, page_size()).unwrap_err();
1692 assert_eq!(err, zx::Status::BAD_STATE);
1693 }
1694
1695 #[fuchsia::test(threads = 2)]
1696 async fn test_grow_zeroes_new_bytes() {
1697 let scope = ExecutionScope::new();
1701 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1702 let page_size = page_size();
1703 let vmo_size: u64 = page_size * 2;
1704 let file_a =
1705 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::RESIZABLE);
1706 let file_b =
1707 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1708 let mut buffer = vec![VmoDirtyRange::default(); 3];
1709
1710 assert_eq!(file_a.vmo().get_stream_size().unwrap(), page_size * 2);
1711 assert_eq!(file_b.vmo().get_stream_size().unwrap(), page_size * 2);
1712
1713 let mut read_buf = vec![0u8; page_size as usize];
1715 file_a.vmo().read(&mut read_buf, page_size).expect("read a");
1716 assert_eq!(
1717 file_a.pager_requests(true),
1718 vec![PagerRequest::PageIn(page_size..page_size * 2)]
1719 );
1720 file_b.vmo().read(&mut read_buf, page_size).expect("read b");
1721 assert_eq!(
1722 file_b.pager_requests(true),
1723 vec![PagerRequest::PageIn(page_size..page_size * 2)]
1724 );
1725
1726 let vmo_size = page_size * 8;
1728 file_a.vmo().set_size(vmo_size).unwrap();
1729 file_b.vmo().set_stream_size(vmo_size).unwrap();
1730
1731 assert_eq!(
1732 pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1733 (1, 0)
1734 );
1735 assert_eq!(
1736 buffer[0],
1737 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1738 );
1739 assert_eq!(
1740 pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1741 (1, 0)
1742 );
1743 assert_eq!(
1744 buffer[0],
1745 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1746 );
1747
1748 let mut read_buf = vec![0u8; page_size as usize * 6];
1750 file_a.vmo().read(&mut read_buf, page_size * 2).expect("read a");
1751 assert_eq!(file_a.pager_requests(true), vec![]);
1752 file_b.vmo().read(&mut read_buf, page_size * 2).expect("read b");
1753 assert_eq!(file_b.pager_requests(true), vec![]);
1754
1755 let vmo_size = page_size * 8;
1757 file_a.vmo().set_size(vmo_size).unwrap();
1758 file_b.vmo().set_stream_size(vmo_size).unwrap();
1759 assert_eq!(
1760 pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1761 (1, 0)
1762 );
1763 assert_eq!(
1764 buffer[0],
1765 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1766 );
1767 assert_eq!(
1768 pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1769 (1, 0)
1770 );
1771 assert_eq!(
1772 buffer[0],
1773 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1774 );
1775 assert_eq!(file_a.pager_requests(true), vec![],);
1777 assert_eq!(file_b.pager_requests(true), vec![],);
1778
1779 file_b.vmo().write(&[1; 10], page_size * 2).unwrap();
1781 assert_eq!(
1782 file_b.pager_requests(true),
1783 vec![PagerRequest::Dirty(page_size * 2..page_size * 3)],
1784 );
1785
1786 let vmo_size = page_size * 4;
1788 file_b.vmo().set_stream_size(vmo_size).unwrap();
1789 let stream =
1790 zx::Stream::create(zx::StreamOptions::MODE_WRITE, file_b.vmo(), page_size * 4).unwrap();
1791 stream.write(zx::StreamWriteOptions::empty(), &vec![10; page_size as usize]).unwrap();
1792 assert_eq!(
1793 file_b.pager_requests(true),
1794 vec![PagerRequest::Dirty(page_size * 4..page_size * 5)],
1795 );
1796
1797 let stream = zx::Stream::create(
1799 zx::StreamOptions::MODE_WRITE | zx::StreamOptions::MODE_APPEND,
1800 file_b.vmo(),
1801 page_size * 5,
1802 )
1803 .unwrap();
1804 stream.write(zx::StreamWriteOptions::empty(), &[10; 1024]).unwrap();
1805 assert_eq!(
1806 file_b.pager_requests(true),
1807 vec![PagerRequest::Dirty(page_size * 5..page_size * 6)],
1808 );
1809
1810 scope.wait().await;
1811 }
1812
1813 #[fuchsia::test(threads = 2)]
1814 async fn test_pathological_shrink_unbounded_vmo() {
1815 let scope = ExecutionScope::new();
1816 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1817 let page_size = page_size();
1818 let vmo_size: u64 = page_size * 25600; let file =
1820 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1821 let mut buffer = vec![VmoDirtyRange::default(); 10];
1822
1823 assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1824
1825 for i in 0..vmo_size / 256 {
1827 let data = vec![5; 20];
1828 file.vmo.write(&data, i * 256).expect("write failed");
1829 }
1830
1831 for i in (0..25600u64 / 1024).rev() {
1832 file.vmo().set_stream_size(i * 1024 + page_size / 2).unwrap();
1833 }
1834
1835 assert_eq!(pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap(), (1, 0));
1836 assert_eq!(buffer[0..1], [VmoDirtyRange { offset: 0, length: page_size, options: 0 },]);
1837
1838 scope.wait().await;
1839 }
1840
1841 #[fuchsia::test(threads = 2)]
1842 async fn test_pathological_shrink_unbounded_vmo_with_gaps() {
1843 let scope = ExecutionScope::new();
1844 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1845 let page_size = page_size();
1846 let vmo_size: u64 = page_size * 25600; let file =
1848 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1849 let mut buffer = vec![VmoDirtyRange::default(); 10];
1850
1851 assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1852
1853 for offset in (0u64..vmo_size).step_by((page_size * 2) as usize) {
1855 let data = vec![5; 20];
1856 file.vmo.write(&data, offset).expect("write failed");
1857 }
1858 let (actual, remaining) =
1860 pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap();
1861 assert_eq!(actual + remaining, 25600 / 2);
1862
1863 let mut offset = vmo_size.saturating_sub(5 * page_size - 2);
1865 'outer: loop {
1868 for delta in [5 * page_size, 4 * page_size] {
1869 file.vmo().set_stream_size(offset).unwrap();
1870 assert_eq!(
1871 pager.query_dirty_ranges(file.vmo(), offset..vmo_size, &mut buffer).unwrap(),
1872 (1, 0)
1873 );
1874 assert_eq!(
1876 buffer[0..1],
1877 [VmoDirtyRange {
1878 offset: round_down(offset, page_size),
1879 length: page_size,
1880 options: 0
1881 },]
1882 );
1883 offset = offset.saturating_sub(delta);
1884 if offset == 0 {
1885 break 'outer;
1886 }
1887 }
1888 }
1889
1890 scope.wait().await;
1891 }
1892
1893 #[fuchsia::test(threads = 2)]
1894 async fn test_grow_unbounded_vmo() {
1895 let scope = ExecutionScope::new();
1896 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1897 let file = MockFile::new_with_size_and_type(pager.clone(), 128, zx::VmoOptions::UNBOUNDED);
1898
1899 let data = vec![1; 128];
1900 file.vmo().write(&data, 128).expect("write failed");
1902 file.vmo().set_stream_size(256).unwrap();
1904 assert_eq!(file.vmo().get_stream_size().expect("get_stream_size"), 256);
1905
1906 let mut data = vec![0xff; 256];
1907 file.vmo().read(&mut data, 0).expect("read");
1908 let expected = vec![0; 256];
1909 assert_eq!(data, expected);
1910
1911 file.vmo().set_stream_size(page_size() * 3).unwrap();
1912 let mut buffer = vec![VmoDirtyRange::default(); 10];
1913 assert_eq!(
1914 pager.query_dirty_ranges(file.vmo(), 0..page_size() * 3, &mut buffer).unwrap(),
1915 (2, 0)
1916 );
1917 assert_eq!(
1919 buffer[0..2],
1920 [
1921 VmoDirtyRange { offset: 0, length: page_size(), options: 0 },
1922 VmoDirtyRange { offset: page_size(), length: page_size() * 2, options: 1 },
1923 ]
1924 );
1925
1926 scope.wait().await;
1927 }
1928}