1use crate::fuchsia::errors::map_to_status;
6use crate::fuchsia::node::FxNode;
7use crate::fuchsia::profile::Recorder;
8use anyhow::Error;
9use bitflags::bitflags;
10use fuchsia_async::epoch::{Epoch, EpochGuard};
11use fuchsia_async::{self as fasync};
12use fuchsia_sync::{Mutex, MutexGuard};
13use fxfs::future_with_guard::FutureWithGuard;
14use fxfs::log::*;
15use fxfs::range::RangeExt;
16use fxfs::round::{round_down, round_up};
17use std::future::Future;
18use std::marker::PhantomData;
19use std::mem::MaybeUninit;
20use std::ops::{Deref, Range};
21use std::sync::atomic::{AtomicU64, Ordering};
22use std::sync::{Arc, Weak};
23use storage_device::buffer;
24use vfs::execution_scope::ExecutionScope;
25use zx::sys::zx_page_request_command_t::{ZX_PAGER_VMO_DIRTY, ZX_PAGER_VMO_READ};
26use zx::{PacketContents, PagerPacket, SignalPacket};
27
28pub static STRONG_FILE_REFS: AtomicU64 = AtomicU64::new(0);
29
30fn watch_for_zero_children(file: &impl PagerBacked) -> Result<(), zx::Status> {
31 file.vmo().wait_async(
32 file.pager().executor.port(),
33 file.pager_packet_receiver_registration().key(),
34 zx::Signals::VMO_ZERO_CHILDREN,
35 zx::WaitAsyncOpts::empty(),
36 )
37}
38
39pub type PagerPacketReceiverRegistration<T> = fasync::ReceiverRegistration<PagerPacketReceiver<T>>;
40
41pub struct PagerPacketReceiver<T> {
43 file: Mutex<FileHolder<T>>,
44}
45
46pub struct PagerPacketReceiverLock<'a, T> {
48 _guard: MutexGuard<'a, FileHolder<T>>,
49 strong: bool,
50}
51
52impl<T> PagerPacketReceiverLock<'_, T> {
53 pub fn is_strong(&self) -> bool {
55 self.strong
56 }
57}
58
59impl<T: PagerBacked> PagerPacketReceiver<T> {
60 pub fn stop_watching_for_zero_children(&self) {
64 let mut file = self.file.lock();
65 if let FileHolder::Strong(strong) = &*file {
66 let weak = FileHolder::Weak(Arc::downgrade(&strong));
67 let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
68 unreachable!();
69 };
70 STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
71 strong.on_zero_children();
72 }
73 }
74
75 pub fn set_receiver(&self, new_receiver: &Arc<T>) -> PagerPacketReceiverLock<'_, T> {
78 let mut receiver_lock = self.file.lock();
79 let strong = match &mut *receiver_lock {
80 FileHolder::Strong(arc) => {
81 *arc = new_receiver.clone();
82 true
83 }
84 FileHolder::Weak(arc) => {
85 *arc = Arc::downgrade(new_receiver);
86 false
87 }
88 };
89 PagerPacketReceiverLock { _guard: receiver_lock, strong }
90 }
91
92 fn receive_pager_packet(&self, contents: PagerPacket) {
93 let command = contents.command();
94 if command != ZX_PAGER_VMO_READ && command != ZX_PAGER_VMO_DIRTY {
95 return;
96 }
97
98 let (file, epoch_guard) = {
99 let file_lock = self.file.lock();
100 let file = match &*file_lock {
101 FileHolder::Strong(file) => file.clone(),
102 FileHolder::Weak(file) => {
103 if let Some(file) = file.upgrade() {
104 file
105 } else {
106 error!("Received a page request for a file that is closed {:?}", contents);
107 return;
108 }
109 }
110 };
111
112 let epoch_guard = match command {
118 ZX_PAGER_VMO_READ => Some(Epoch::global().guard()),
122 _ => None,
123 };
124 (file, epoch_guard)
125 };
126
127 let Some(_scope_guard) = file.pager().scope.try_active_guard() else {
129 file.pager().report_failure(file.vmo(), contents.range(), zx::Status::BAD_STATE);
132 return;
133 };
134 match command {
135 ZX_PAGER_VMO_READ => {
136 file.clone().page_in(PageInRange::new(contents.range(), file, epoch_guard.unwrap()))
137 }
138 ZX_PAGER_VMO_DIRTY => {
139 file.clone().mark_dirty(MarkDirtyRange::new(contents.range(), file))
140 }
141 _ => unreachable!("Unhandled commands are filtered above"),
142 }
143 }
144
145 fn receive_signal_packet(&self, signals: SignalPacket) {
146 assert!(signals.observed().contains(zx::Signals::VMO_ZERO_CHILDREN));
147
148 let mut file = self.file.lock();
153 if let FileHolder::Strong(strong) = &*file {
154 let Some(_guard) = strong.pager().scope.try_active_guard() else {
162 info!("Ignoring zero-children notification due to shutting down");
163 return;
164 };
165 match strong.vmo().info() {
166 Ok(info) => {
167 if info.num_children == 0 {
168 let weak = FileHolder::Weak(Arc::downgrade(&strong));
169 let FileHolder::Strong(strong) = std::mem::replace(&mut *file, weak) else {
170 unreachable!();
171 };
172 STRONG_FILE_REFS.fetch_sub(1, Ordering::Relaxed);
173 strong.on_zero_children();
174 } else {
175 watch_for_zero_children(strong.as_ref()).unwrap();
177 }
178 }
179 Err(e) => error!(error:? = e; "Vmo::info failed"),
180 }
181 }
182 }
183}
184
185impl<T: PagerBacked> fasync::PacketReceiver for PagerPacketReceiver<T> {
186 fn receive_packet(&self, packet: zx::Packet) {
187 match packet.contents() {
188 PacketContents::Pager(contents) => {
189 self.receive_pager_packet(contents);
190 }
191 PacketContents::SignalOne(signals) => {
192 self.receive_signal_packet(signals);
193 }
194 _ => unreachable!(), }
196 }
197}
198
199pub struct Pager {
200 pager: zx::Pager,
201 scope: ExecutionScope,
202 executor: fasync::EHandle,
203 recorder: Mutex<Option<Box<dyn Recorder>>>,
204}
205
206enum FileHolder<T> {
211 Strong(Arc<T>),
212 Weak(Weak<T>),
213}
214
215impl Pager {
217 pub fn new(scope: ExecutionScope) -> Result<Self, Error> {
219 Ok(Pager {
220 pager: zx::Pager::create(zx::PagerOptions::empty())?,
221 scope,
222 executor: fasync::EHandle::local(),
223 recorder: Mutex::new(None),
224 })
225 }
226
227 fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) {
229 if let Some(guard) = self.scope.try_active_guard() {
230 self.executor.spawn_detached(FutureWithGuard::new(guard, task));
231 }
232 }
233
234 pub fn set_recorder(&self, recorder: Option<Box<dyn Recorder>>) {
236 let _old = std::mem::replace(&mut (*self.recorder.lock()), recorder);
238 }
239
240 pub fn recorder(&self) -> MutexGuard<'_, Option<Box<dyn Recorder>>> {
242 self.recorder.lock()
243 }
244
245 pub fn record_page_in<P: PagerBacked>(&self, node: Arc<P>, range: Range<u64>) {
247 let mut recorder_holder = self.recorder.lock();
248 if let Some(recorder) = &mut (*recorder_holder) {
249 if let Err(_) = recorder.record(node, range.start) {
251 *recorder_holder = None;
252 }
253 }
254 }
255
256 pub fn create_vmo<T: PagerBacked>(
258 &self,
259 file: Weak<T>,
260 initial_size: u64,
261 vmo_options: zx::VmoOptions,
262 ) -> Result<(zx::Vmo, PagerPacketReceiverRegistration<T>), Error> {
263 let registration = self
264 .executor
265 .register_receiver(PagerPacketReceiver { file: Mutex::new(FileHolder::Weak(file)) });
266 Ok((
267 self.pager.create_vmo(
268 vmo_options,
269 self.executor.port(),
270 registration.key(),
271 initial_size,
272 )?,
273 registration,
274 ))
275 }
276
277 pub fn watch_for_zero_children(&self, file: &impl PagerBacked) -> Result<bool, Error> {
281 let mut file = file.pager_packet_receiver_registration().file.lock();
282
283 match &*file {
284 FileHolder::Weak(weak) => {
285 let strong = weak.upgrade().unwrap();
287
288 watch_for_zero_children(strong.as_ref())?;
289
290 STRONG_FILE_REFS.fetch_add(1, Ordering::Relaxed);
291 *file = FileHolder::Strong(strong);
292 Ok(true)
293 }
294 FileHolder::Strong(_) => Ok(false),
295 }
296 }
297
298 fn supply_pages(
301 &self,
302 vmo: &zx::Vmo,
303 range: Range<u64>,
304 transfer_vmo: &zx::Vmo,
305 transfer_offset: u64,
306 ) {
307 if let Err(e) = self.pager.supply_pages(vmo, range, transfer_vmo, transfer_offset) {
308 error!(error:? = e; "supply_pages failed");
309 }
310 }
311
312 fn report_failure(&self, vmo: &zx::Vmo, range: Range<u64>, status: zx::Status) {
316 let pager_status = match status {
317 zx::Status::IO_DATA_INTEGRITY => zx::Status::IO_DATA_INTEGRITY,
318 zx::Status::NO_SPACE => zx::Status::NO_SPACE,
319 zx::Status::FILE_BIG => zx::Status::BUFFER_TOO_SMALL,
320 zx::Status::IO
321 | zx::Status::IO_DATA_LOSS
322 | zx::Status::IO_INVALID
323 | zx::Status::IO_MISSED_DEADLINE
324 | zx::Status::IO_NOT_PRESENT
325 | zx::Status::IO_OVERRUN
326 | zx::Status::IO_REFUSED
327 | zx::Status::PEER_CLOSED => zx::Status::IO,
328 _ => zx::Status::BAD_STATE,
329 };
330 if let Err(e) = self.pager.op_range(zx::PagerOp::Fail(pager_status), vmo, range) {
331 error!(error:? = e; "op_range failed");
332 }
333 }
334
335 fn dirty_pages(&self, vmo: &zx::Vmo, range: Range<u64>) {
338 if let Err(e) = self.pager.op_range(zx::PagerOp::Dirty, vmo, range) {
339 if e != zx::Status::NOT_FOUND {
344 error!(error:? = e; "dirty_pages failed");
345 }
346 }
347 }
348
349 pub fn writeback_begin(
352 &self,
353 vmo: &zx::Vmo,
354 range: Range<u64>,
355 options: zx::PagerWritebackBeginOptions,
356 ) {
357 if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackBegin(options), vmo, range) {
358 error!(error:? = e; "writeback_begin failed");
359 }
360 }
361
362 pub fn writeback_end(&self, vmo: &zx::Vmo, range: Range<u64>) {
365 if let Err(e) = self.pager.op_range(zx::PagerOp::WritebackEnd, vmo, range) {
366 error!(error:? = e; "writeback_end failed");
367 }
368 }
369
370 pub fn query_dirty_ranges(
375 &self,
376 vmo: &zx::Vmo,
377 range: Range<u64>,
378 buffer: &mut [VmoDirtyRange],
379 ) -> Result<(usize, usize), zx::Status> {
380 let mut actual = 0;
381 let mut avail = 0;
382 let status = unsafe {
383 zx::sys::zx_pager_query_dirty_ranges(
386 self.pager.raw_handle(),
387 vmo.raw_handle(),
388 range.start,
389 range.end - range.start,
390 buffer.as_mut_ptr() as *mut u8,
391 std::mem::size_of_val(buffer),
392 &mut actual as *mut usize,
393 &mut avail as *mut usize,
394 )
395 };
396 zx::ok(status).map(|_| (actual, avail - actual))
397 }
398
399 pub fn query_vmo_stats(
403 &self,
404 vmo: &zx::Vmo,
405 options: PagerVmoStatsOptions,
406 ) -> Result<PagerVmoStats, zx::Status> {
407 #[repr(C)]
408 #[derive(Default)]
409 struct zx_pager_vmo_stats {
410 pub modified: u32,
411 }
412 const ZX_PAGER_VMO_STATS_MODIFIED: u32 = 1;
413 let mut vmo_stats = MaybeUninit::<zx_pager_vmo_stats>::uninit();
414 let status = unsafe {
415 zx::sys::zx_pager_query_vmo_stats(
418 self.pager.raw_handle(),
419 vmo.raw_handle(),
420 options.bits(),
421 vmo_stats.as_mut_ptr() as *mut u8,
422 std::mem::size_of::<zx_pager_vmo_stats>(),
423 )
424 };
425 zx::ok(status)?;
426 let vmo_stats = unsafe { vmo_stats.assume_init() };
427 Ok(PagerVmoStats { was_vmo_modified: vmo_stats.modified == ZX_PAGER_VMO_STATS_MODIFIED })
428 }
429
430 pub async fn page_in_barrier() {
431 Epoch::global().barrier().await;
432 }
433}
434
435pub trait PagerBacked: FxNode + Sync + Send + Sized + 'static {
437 fn pager(&self) -> &Pager;
439
440 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self>;
442
443 fn vmo(&self) -> &zx::Vmo;
446
447 fn page_in(self: Arc<Self>, range: PageInRange<Self>);
451
452 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>);
456
457 fn on_zero_children(self: Arc<Self>);
459
460 fn byte_size(&self) -> u64;
462
463 fn aligned_read(
470 &self,
471 aligned_byte_range: std::ops::Range<u64>,
472 ) -> impl Future<Output = Result<buffer::Buffer<'_>, Error>> + Send;
473}
474
475pub fn default_page_in<P: PagerBacked>(
477 this: Arc<P>,
478 pager_range: PageInRange<P>,
479 read_ahead_size: u64,
480) {
481 fxfs_trace::duration!(
482 "start-page-in",
483 "offset" => pager_range.start(),
484 "len" => pager_range.len()
485 );
486
487 const ZERO_VMO_SIZE: u64 = 1_048_576;
488 static ZERO_VMO: std::sync::LazyLock<zx::Vmo> =
489 std::sync::LazyLock::new(|| zx::Vmo::create(ZERO_VMO_SIZE).unwrap());
490
491 assert!(pager_range.end() < i64::MAX as u64);
492
493 let page_aligned_size = round_up(this.byte_size(), page_size()).unwrap();
509
510 let (read_range, zero_range) = pager_range.split(page_aligned_size);
514 if let Some(zero_range) = zero_range {
515 for range in zero_range.chunks(ZERO_VMO_SIZE) {
516 range.supply_pages(&ZERO_VMO, 0);
517 }
518 }
519
520 if let Some(read_range) = read_range {
521 let expanded_range_for_readahead = round_down(read_range.start(), read_ahead_size)
522 ..std::cmp::min(
523 round_up(read_range.end(), read_ahead_size).unwrap(),
524 page_aligned_size,
525 );
526 let read_range = read_range.expand(expanded_range_for_readahead);
527 for range in read_range.chunks(read_ahead_size) {
528 this.pager().record_page_in(this.clone(), range.range.clone());
536
537 this.pager().spawn(page_in_chunk(this.clone(), range));
538 }
539 }
540}
541
542#[fxfs_trace::trace("offset" => read_range.start(), "len" => read_range.len())]
543async fn page_in_chunk<P: PagerBacked>(this: Arc<P>, read_range: PageInRange<P>) {
544 let buffer = match this.aligned_read(read_range.range()).await {
545 Ok(v) => v,
546 Err(error) => {
547 error!(range:? = read_range.range(), error:?; "Failed to load range");
548 read_range.report_failure(map_to_status(error));
549 return;
550 }
551 };
552 assert!(
553 buffer.len() as u64 >= read_range.len(),
554 "A buffer smaller than requested was returned. requested: {}, returned: {}",
555 read_range.len(),
556 buffer.len()
557 );
558 read_range.supply_pages(buffer.allocator().buffer_source().vmo(), buffer.range().start as u64);
559}
560
561#[repr(C)]
563#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
564pub struct VmoDirtyRange {
565 offset: u64,
566 length: u64,
567 options: u64,
568}
569
570impl VmoDirtyRange {
571 pub fn range(&self) -> Range<u64> {
573 self.offset..(self.offset + self.length)
574 }
575
576 pub fn is_zero_range(&self) -> bool {
578 self.options & zx::sys::ZX_VMO_DIRTY_RANGE_IS_ZERO != 0
579 }
580}
581
582bitflags! {
583 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
585 #[repr(transparent)]
586 pub struct PagerVmoStatsOptions: u32 {
587 const RESET_VMO_STATS = 1;
589 }
590}
591
592#[derive(Debug)]
594pub struct PagerVmoStats {
595 was_vmo_modified: bool,
596}
597
598impl PagerVmoStats {
599 pub fn was_vmo_modified(&self) -> bool {
601 self.was_vmo_modified
602 }
603}
604
605#[inline]
606fn page_size() -> u64 {
607 zx::system_get_page_size().into()
608}
609
610pub trait PagerRequestType {
612 fn request_type_name() -> &'static str;
614}
615
616pub struct PageInRequest;
618
619impl PagerRequestType for PageInRequest {
620 fn request_type_name() -> &'static str {
621 "PageInRequest"
622 }
623}
624
625pub type PageInRange<T> = PagerRange<T, PageInRequest>;
628
629impl<T: PagerBacked> PageInRange<T> {
630 pub fn new(range: Range<u64>, file: Arc<T>, epoch_guard: EpochGuard<'static>) -> Self {
632 debug_assert!(
633 range.start % page_size() == 0 && range.end % page_size() == 0,
634 "{:?} is not page aligned",
635 range
636 );
637 Self {
638 range,
639 inner: Some(PagerRangeInner { file, _epoch_guard: Some(epoch_guard) }),
640 _request_type: PhantomData,
641 }
642 }
643
644 pub fn supply_pages(mut self, transfer_vmo: &zx::Vmo, transfer_offset: u64) {
647 let inner = self.inner.take().unwrap();
648 inner.file.pager().supply_pages(
649 inner.file.vmo(),
650 self.range.clone(),
651 transfer_vmo,
652 transfer_offset,
653 );
654 }
655}
656
657#[derive(Debug)]
659pub struct MarkDirtyRequest;
660
661impl PagerRequestType for MarkDirtyRequest {
662 fn request_type_name() -> &'static str {
663 "MarkDirtyRequest"
664 }
665}
666
667pub type MarkDirtyRange<T> = PagerRange<T, MarkDirtyRequest>;
670
671impl<T: PagerBacked> MarkDirtyRange<T> {
672 pub fn new(range: Range<u64>, file: Arc<T>) -> Self {
674 debug_assert!(
675 range.start % page_size() == 0 && range.end % page_size() == 0,
676 "{:?} is not page aligned",
677 range
678 );
679 Self {
680 range,
681 inner: Some(PagerRangeInner { file, _epoch_guard: None }),
682 _request_type: PhantomData,
683 }
684 }
685
686 pub fn dirty_pages(mut self) {
689 let inner = self.inner.take().unwrap();
690 inner.file.pager().dirty_pages(inner.file.vmo(), self.range.clone());
691 }
692}
693
694#[derive(Clone)]
695struct PagerRangeInner<T: std::clone::Clone + Deref<Target: PagerBacked>> {
696 file: T,
699
700 _epoch_guard: Option<EpochGuard<'static>>,
703}
704
705pub struct PagerRange<T: PagerBacked, U: PagerRequestType> {
708 range: Range<u64>,
709
710 inner: Option<PagerRangeInner<Arc<T>>>,
712
713 _request_type: PhantomData<U>,
714}
715
716impl<T: PagerBacked, U: PagerRequestType> PagerRange<T, U> {
717 pub fn split(mut self, split_point: u64) -> (Option<Self>, Option<Self>) {
721 let inner = self.inner.take().unwrap();
722 let (left, right) = self.range.clone().split(split_point);
723 let right = right.map(|range| Self {
724 range,
725 inner: Some(inner.clone()),
726 _request_type: PhantomData,
727 });
728 let left = left.map(|range| Self { range, inner: Some(inner), _request_type: PhantomData });
729 (left, right)
730 }
731
732 pub fn expand(mut self, new_range: Range<u64>) -> Self {
735 assert!(
736 self.range.start >= new_range.start && self.range.end <= new_range.end,
737 "{:?} is not a subset of {:?}",
738 self.range,
739 new_range
740 );
741 debug_assert!(
742 new_range.start % page_size() == 0 && new_range.end % page_size() == 0,
743 "{:?} is not page aligned",
744 new_range
745 );
746 self.range = new_range;
747 self
748 }
749
750 pub fn chunks(mut self, chunk_size: u64) -> PagerRangeChunksIter<T, U> {
755 debug_assert!(
756 chunk_size % page_size() == 0,
757 "{} is not a multiple of the page size",
758 chunk_size
759 );
760 PagerRangeChunksIter {
761 start: self.range.start,
762 end: self.range.end,
763 chunk_size: chunk_size,
764 inner: self.inner.take(),
765 _request_type: PhantomData,
766 }
767 }
768
769 #[inline]
770 pub fn start(&self) -> u64 {
771 self.range.start
772 }
773
774 #[inline]
775 pub fn end(&self) -> u64 {
776 self.range.end
777 }
778
779 #[inline]
780 pub fn len(&self) -> u64 {
781 self.range.end - self.range.start
782 }
783
784 #[inline]
785 pub fn range(&self) -> Range<u64> {
786 self.range.clone()
787 }
788
789 pub fn report_failure(mut self, status: zx::Status) {
792 let inner = self.inner.take().unwrap();
793 inner.file.pager().report_failure(inner.file.vmo(), self.range.clone(), status);
794 }
795
796 #[cfg(test)]
798 fn consume(mut self) {
799 self.inner.take().unwrap();
800 }
801}
802
803impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRange<T, U> {
804 fn drop(&mut self) {
805 if let Some(inner) = &self.inner {
806 let request_type = U::request_type_name();
807 let range = self.range.clone();
808 let key = inner.file.pager_packet_receiver_registration().key();
809 if cfg!(debug_assertions) {
810 if !std::thread::panicking() {
814 panic!(
815 "PagerRange was dropped without sending a response, \
816 request_type={request_type}, range={range:?}, key={key}",
817 );
818 }
819 } else {
820 error!(
821 "PagerRange was dropped without sending a response, \
822 request_type={request_type}, range={range:?}, key={key}",
823 );
824 inner.file.pager().report_failure(inner.file.vmo(), range, zx::Status::BAD_STATE);
825 }
826 }
827 }
828}
829
830pub struct PagerRangeChunksIter<T: PagerBacked, U: PagerRequestType> {
833 start: u64,
834 end: u64,
835 chunk_size: u64,
836 inner: Option<PagerRangeInner<Arc<T>>>,
838 _request_type: PhantomData<U>,
839}
840
841impl<T: PagerBacked, U: PagerRequestType> Iterator for PagerRangeChunksIter<T, U> {
842 type Item = PagerRange<T, U>;
843 fn next(&mut self) -> Option<Self::Item> {
844 if self.start == self.end {
845 None
846 } else if self.start + self.chunk_size >= self.end {
847 let next = Self::Item {
848 range: self.start..self.end,
849 inner: self.inner.take(),
850 _request_type: PhantomData,
851 };
852 self.start = self.end;
853 Some(next)
854 } else {
855 let next_end = self.start + self.chunk_size;
856 let next = Self::Item {
857 range: self.start..next_end,
858 inner: self.inner.clone(),
859 _request_type: PhantomData,
860 };
861 self.start = next_end;
862 Some(next)
863 }
864 }
865}
866
867impl<T: PagerBacked, U: PagerRequestType> Drop for PagerRangeChunksIter<T, U> {
868 fn drop(&mut self) {
869 if self.start != self.end {
870 let request_type = U::request_type_name();
871 let remaining = self.start..self.end;
872 let inner = self.inner.take().unwrap();
873 let key = inner.file.pager_packet_receiver_registration().key();
874 if cfg!(debug_assertions) {
875 if !std::thread::panicking() {
879 panic!(
880 "PagerRangeChunksIter was dropped without being fully consumed, \
881 request_type={request_type}, remaining={remaining:?}, key={key}",
882 );
883 }
884 } else {
885 error!(
886 "PagerRangeChunksIter was dropped without being fully consumed, \
887 request_type={request_type}, remaining={remaining:?}, key={key}",
888 );
889 inner.file.pager().report_failure(
890 inner.file.vmo(),
891 remaining,
892 zx::Status::BAD_STATE,
893 );
894 }
895 }
896 }
897}
898
899#[cfg(test)]
900mod tests {
901 use super::*;
902 use futures::StreamExt;
903 use futures::channel::mpsc;
904 use fxfs_macros::ToWeakNode;
905
906 #[derive(Clone, Debug, PartialEq, Eq)]
907 enum PagerRequest {
908 PageIn(Range<u64>),
909 Dirty(Range<u64>),
910 }
911
912 #[derive(ToWeakNode)]
913 struct MockFile {
914 vmo: zx::Vmo,
915 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
916 pager: Arc<Pager>,
917 pager_requests: Mutex<Vec<PagerRequest>>,
919 }
920
921 impl MockFile {
922 fn new(pager: Arc<Pager>) -> Arc<Self> {
923 Self::new_with_size_and_type(pager, page_size(), zx::VmoOptions::UNBOUNDED)
924 }
925
926 fn new_with_size_and_type(
927 pager: Arc<Pager>,
928 size: u64,
929 vmo_type: zx::VmoOptions,
930 ) -> Arc<Self> {
931 Arc::new_cyclic(|weak| {
932 let (vmo, pager_packet_receiver_registration) = pager
933 .create_vmo(weak.clone(), size, vmo_type | zx::VmoOptions::TRAP_DIRTY)
934 .unwrap();
935 Self {
936 pager,
937 vmo,
938 pager_packet_receiver_registration,
939 pager_requests: Default::default(),
940 }
941 })
942 }
943
944 fn pager_requests(&self, reset: bool) -> Vec<PagerRequest> {
946 if reset {
947 std::mem::take(&mut *self.pager_requests.lock())
948 } else {
949 self.pager_requests.lock().clone()
950 }
951 }
952 }
953
954 impl FxNode for MockFile {
955 fn object_id(&self) -> u64 {
956 unimplemented!();
957 }
958
959 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
960 unimplemented!();
961 }
962
963 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
964 unimplemented!();
965 }
966
967 fn open_count_add_one(&self) {
968 unimplemented!();
969 }
970
971 fn open_count_sub_one(self: Arc<Self>) {
972 unimplemented!();
973 }
974
975 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
976 unimplemented!();
977 }
978 }
979
980 impl PagerBacked for MockFile {
981 fn pager(&self) -> &Pager {
982 &self.pager
983 }
984
985 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
986 &self.pager_packet_receiver_registration
987 }
988
989 fn vmo(&self) -> &zx::Vmo {
990 &self.vmo
991 }
992
993 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
994 let aux_vmo = zx::Vmo::create(range.len()).unwrap();
995 self.pager_requests.lock().push(PagerRequest::PageIn(range.range()));
996 range.supply_pages(&aux_vmo, 0);
997 }
998
999 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1000 self.pager_requests.lock().push(PagerRequest::Dirty(range.range()));
1001 range.dirty_pages();
1002 }
1003
1004 fn on_zero_children(self: Arc<Self>) {}
1005
1006 fn byte_size(&self) -> u64 {
1007 unimplemented!();
1008 }
1009 async fn aligned_read(
1010 &self,
1011 _aligned_byte_range: std::ops::Range<u64>,
1012 ) -> Result<buffer::Buffer<'_>, Error> {
1013 unimplemented!();
1014 }
1015 }
1016
1017 #[derive(ToWeakNode)]
1018 struct OnZeroChildrenFile {
1019 pager: Arc<Pager>,
1020 vmo: zx::Vmo,
1021 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1022 sender: Mutex<mpsc::UnboundedSender<()>>,
1023 }
1024
1025 impl OnZeroChildrenFile {
1026 fn new(pager: Arc<Pager>, sender: mpsc::UnboundedSender<()>) -> Arc<Self> {
1027 Arc::new_cyclic(|weak| {
1028 let (vmo, pager_packet_receiver_registration) =
1029 pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1030 Self { pager, vmo, pager_packet_receiver_registration, sender: Mutex::new(sender) }
1031 })
1032 }
1033 }
1034
1035 impl FxNode for OnZeroChildrenFile {
1036 fn object_id(&self) -> u64 {
1037 unimplemented!();
1038 }
1039
1040 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1041 unimplemented!();
1042 }
1043
1044 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1045 unimplemented!();
1046 }
1047
1048 fn open_count_add_one(&self) {
1049 unimplemented!();
1050 }
1051
1052 fn open_count_sub_one(self: Arc<Self>) {
1053 unimplemented!();
1054 }
1055
1056 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1057 unimplemented!();
1058 }
1059 }
1060
1061 impl PagerBacked for OnZeroChildrenFile {
1062 fn pager(&self) -> &Pager {
1063 &self.pager
1064 }
1065
1066 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1067 &self.pager_packet_receiver_registration
1068 }
1069
1070 fn vmo(&self) -> &zx::Vmo {
1071 &self.vmo
1072 }
1073
1074 fn page_in(self: Arc<Self>, _range: PageInRange<Self>) {
1075 unreachable!();
1076 }
1077
1078 fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1079 unreachable!();
1080 }
1081
1082 fn on_zero_children(self: Arc<Self>) {
1083 self.sender.lock().unbounded_send(()).unwrap();
1084 }
1085 fn byte_size(&self) -> u64 {
1086 unreachable!();
1087 }
1088 async fn aligned_read(
1089 &self,
1090 _aligned_byte_range: std::ops::Range<u64>,
1091 ) -> Result<buffer::Buffer<'_>, Error> {
1092 unreachable!();
1093 }
1094 }
1095
1096 #[fuchsia::test(threads = 2)]
1097 async fn test_watch_for_zero_children() {
1098 let (sender, mut receiver) = mpsc::unbounded();
1099 let scope = ExecutionScope::new();
1100 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1101 let file = OnZeroChildrenFile::new(pager.clone(), sender);
1102 {
1103 let _child_vmo = file
1104 .vmo()
1105 .create_child(
1106 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1107 0,
1108 file.vmo().get_content_size().unwrap(),
1109 )
1110 .unwrap();
1111 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1112 }
1113 receiver.next().await.unwrap();
1115
1116 scope.wait().await;
1117 }
1118
1119 #[fuchsia::test(threads = 2)]
1120 async fn test_multiple_watch_for_zero_children_calls() {
1121 let (sender, mut receiver) = mpsc::unbounded();
1122 let scope = ExecutionScope::new();
1123 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1124 let file = OnZeroChildrenFile::new(pager.clone(), sender);
1125 {
1126 let _child_vmo = file
1127 .vmo()
1128 .create_child(
1129 zx::VmoChildOptions::SNAPSHOT_AT_LEAST_ON_WRITE,
1130 0,
1131 file.vmo().get_content_size().unwrap(),
1132 )
1133 .unwrap();
1134 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1135 assert!(!pager.watch_for_zero_children(file.as_ref()).unwrap());
1137 }
1138 receiver.next().await.unwrap();
1139
1140 assert!(pager.watch_for_zero_children(file.as_ref()).unwrap());
1144
1145 file.pager_packet_receiver_registration.stop_watching_for_zero_children();
1146
1147 scope.wait().await;
1148 }
1149
1150 #[fuchsia::test(threads = 2)]
1151 async fn test_status_code_mapping() {
1152 #[derive(ToWeakNode)]
1153 struct StatusCodeFile {
1154 vmo: zx::Vmo,
1155 pager: Arc<Pager>,
1156 status_code: Mutex<zx::Status>,
1157 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1158 }
1159
1160 impl FxNode for StatusCodeFile {
1161 fn object_id(&self) -> u64 {
1162 unimplemented!();
1163 }
1164
1165 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1166 unimplemented!();
1167 }
1168
1169 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1170 unimplemented!();
1171 }
1172
1173 fn open_count_add_one(&self) {
1174 unimplemented!();
1175 }
1176
1177 fn open_count_sub_one(self: Arc<Self>) {
1178 unimplemented!();
1179 }
1180
1181 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1182 unimplemented!();
1183 }
1184 }
1185
1186 impl PagerBacked for StatusCodeFile {
1187 fn pager(&self) -> &Pager {
1188 &self.pager
1189 }
1190
1191 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1192 &self.pager_packet_receiver_registration
1193 }
1194
1195 fn vmo(&self) -> &zx::Vmo {
1196 &self.vmo
1197 }
1198
1199 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1200 range.report_failure(*self.status_code.lock());
1201 }
1202
1203 fn mark_dirty(self: Arc<Self>, _range: MarkDirtyRange<Self>) {
1204 unreachable!();
1205 }
1206
1207 fn on_zero_children(self: Arc<Self>) {
1208 unreachable!();
1209 }
1210
1211 fn byte_size(&self) -> u64 {
1212 unreachable!();
1213 }
1214 async fn aligned_read(
1215 &self,
1216 _aligned_byte_range: std::ops::Range<u64>,
1217 ) -> Result<buffer::Buffer<'_>, Error> {
1218 unreachable!();
1219 }
1220 }
1221
1222 let scope = ExecutionScope::new();
1223 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1224 let file = Arc::new_cyclic(|weak| {
1225 let (vmo, pager_packet_receiver_registration) =
1226 pager.create_vmo(weak.clone(), page_size(), zx::VmoOptions::empty()).unwrap();
1227 StatusCodeFile {
1228 vmo,
1229 pager: pager.clone(),
1230 status_code: Mutex::new(zx::Status::INTERNAL),
1231 pager_packet_receiver_registration,
1232 }
1233 });
1234
1235 fn check_mapping(
1236 file: &StatusCodeFile,
1237 failure_code: zx::Status,
1238 expected_code: zx::Status,
1239 ) {
1240 {
1241 *file.status_code.lock() = failure_code;
1242 }
1243 let mut buf = [0u8; 8];
1244 assert_eq!(file.vmo().read(&mut buf, 0).unwrap_err(), expected_code);
1245 }
1246 check_mapping(&file, zx::Status::IO_DATA_INTEGRITY, zx::Status::IO_DATA_INTEGRITY);
1247 check_mapping(&file, zx::Status::NO_SPACE, zx::Status::NO_SPACE);
1248 check_mapping(&file, zx::Status::FILE_BIG, zx::Status::BUFFER_TOO_SMALL);
1249 check_mapping(&file, zx::Status::IO, zx::Status::IO);
1250 check_mapping(&file, zx::Status::IO_DATA_LOSS, zx::Status::IO);
1251 check_mapping(&file, zx::Status::NOT_EMPTY, zx::Status::BAD_STATE);
1252 check_mapping(&file, zx::Status::BAD_STATE, zx::Status::BAD_STATE);
1253
1254 scope.wait().await;
1255 }
1256
1257 #[fuchsia::test(threads = 2)]
1258 async fn test_query_vmo_stats() {
1259 let scope = ExecutionScope::new();
1260 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1261 let file = MockFile::new(pager.clone());
1262
1263 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1264 assert!(!stats.was_vmo_modified());
1266
1267 file.vmo().write(&[0, 1, 2, 3, 4], 0).unwrap();
1268 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1269 assert!(stats.was_vmo_modified());
1270
1271 let stats =
1273 pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::RESET_VMO_STATS).unwrap();
1274 assert!(stats.was_vmo_modified());
1276
1277 let stats = pager.query_vmo_stats(file.vmo(), PagerVmoStatsOptions::empty()).unwrap();
1278 assert!(!stats.was_vmo_modified());
1279
1280 scope.wait().await;
1281 }
1282
1283 #[fuchsia::test(threads = 2)]
1284 async fn test_query_dirty_ranges() {
1285 let scope = ExecutionScope::new();
1294 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1295 let file = MockFile::new_with_size_and_type(
1296 pager.clone(),
1297 page_size() + page_size() / 2,
1298 zx::VmoOptions::UNBOUNDED,
1299 );
1300 let mut buffer = vec![VmoDirtyRange::default(); 2];
1301
1302 let page_size = page_size();
1303 assert_eq!(file.vmo().get_content_size().unwrap(), page_size + page_size / 2);
1304
1305 let (actual, remaining) =
1306 pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1307 assert_eq!(actual, 0);
1308 assert_eq!(remaining, 0);
1309
1310 file.vmo().set_stream_size(page_size * 7 + page_size / 2).unwrap();
1312
1313 let (actual, remaining) =
1314 pager.query_dirty_ranges(file.vmo(), 0..page_size * 100, &mut buffer).unwrap();
1315 assert_eq!(actual, 2);
1316 assert_eq!(remaining, 0);
1317 assert_eq!(buffer[0].range(), page_size..page_size * 2);
1319 assert!(!buffer[0].is_zero_range());
1320 assert_eq!(buffer[1].range(), page_size * 2..page_size * 8);
1322 assert!(buffer[1].is_zero_range());
1323
1324 assert_eq!(
1327 file.pager_requests(true),
1328 vec![
1329 PagerRequest::PageIn(page_size * 1..page_size * 2),
1330 PagerRequest::Dirty(page_size * 1..page_size * 2),
1331 ]
1332 );
1333
1334 file.vmo().write(&[1, 2, 3, 4], page_size).unwrap();
1336 file.vmo().write(&[1, 2, 3, 4], page_size * 2).unwrap();
1337 file.vmo().write(&[1, 2, 3, 4], page_size * 4).unwrap();
1338
1339 assert_eq!(
1342 file.pager_requests(true),
1343 vec![
1344 PagerRequest::Dirty(page_size * 2..page_size * 3),
1345 PagerRequest::Dirty(page_size * 4..page_size * 5)
1346 ]
1347 );
1348
1349 let (actual, remaining) =
1350 pager.query_dirty_ranges(file.vmo(), 0..page_size * 7, &mut buffer).unwrap();
1351 assert_eq!(actual, 2);
1352 assert_eq!(remaining, 2);
1353 assert_eq!(buffer[0].range(), page_size..(page_size * 3));
1355 assert!(!buffer[0].is_zero_range());
1356 assert_eq!(buffer[1].range(), (page_size * 3)..(page_size * 4));
1358 assert!(buffer[1].is_zero_range());
1359
1360 let (actual, remaining) = pager
1361 .query_dirty_ranges(file.vmo(), page_size * 4..page_size * 7, &mut buffer)
1362 .unwrap();
1363 assert_eq!(actual, 2);
1364 assert_eq!(remaining, 0);
1365 assert_eq!(buffer[0].range(), (page_size * 4)..(page_size * 5));
1367 assert!(!buffer[0].is_zero_range());
1368 assert_eq!(buffer[1].range(), (page_size * 5)..(page_size * 7));
1370 assert!(buffer[1].is_zero_range());
1371
1372 let mut read_buf = vec![0u8; page_size as usize];
1374 file.vmo().read(&mut read_buf, page_size * 3).expect("read");
1375 let expected = vec![0u8; page_size as usize];
1376 assert_eq!(read_buf, expected);
1377 assert_eq!(file.pager_requests(true), vec![]);
1378
1379 scope.wait().await;
1380 }
1381
1382 #[fuchsia::test(threads = 2)]
1383 async fn test_zero_grown_vmo() {
1384 let scope = ExecutionScope::new();
1386 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1387 let file = MockFile::new(pager.clone());
1388
1389 let write_buf = vec![0xff; page_size() as usize * 2];
1390 file.vmo().set_stream_size(page_size() * 2).expect("grow");
1391 file.vmo().write(&write_buf, 0).expect("write");
1392 let mut read_buf = vec![0u8; page_size() as usize * 2];
1393 file.vmo().read(&mut read_buf, 0).expect("read");
1395 assert_eq!(read_buf, write_buf);
1396
1397 file.vmo().set_stream_size(page_size() + 1).expect("shrink");
1399 file.vmo().write(&[0xff; 3], page_size() + 2).expect("write after shrink");
1400 file.vmo().set_stream_size(page_size() + 4).expect("grow again");
1402 let mut read_buf = vec![0u8; page_size() as usize];
1403 file.vmo().read(&mut read_buf, page_size()).expect("read");
1404 let mut expected = vec![0u8; page_size() as usize];
1405 expected[0] = 0xff;
1406 assert_eq!(read_buf, expected);
1407
1408 scope.wait().await;
1409 }
1410
1411 #[fuchsia::test]
1412 async fn test_pager_range_chunks_iter_chunks() {
1413 let scope = ExecutionScope::new();
1414 let pager = Arc::new(Pager::new(scope).unwrap());
1415 let file = MockFile::new(pager.clone());
1416
1417 let pager_range = PageInRange::new(0..page_size() * 5, file, Epoch::global().guard());
1418 let ranges: Vec<Range<u64>> = pager_range
1419 .chunks(page_size() * 2)
1420 .map(|pager_range| {
1421 let range = pager_range.range();
1422 pager_range.consume();
1423 range
1424 })
1425 .collect();
1426 assert_eq!(
1427 ranges,
1428 [
1429 0..page_size() * 2,
1430 page_size() * 2..page_size() * 4,
1431 page_size() * 4..page_size() * 5
1432 ]
1433 );
1434 }
1435
1436 #[fuchsia::test]
1437 async fn test_pager_range_split() {
1438 let scope = ExecutionScope::new();
1439 let pager = Arc::new(Pager::new(scope).unwrap());
1440 let file = MockFile::new(pager.clone());
1441
1442 let pager_range = PageInRange::new(0..page_size() * 10, file, Epoch::global().guard());
1443 let (left, right) = pager_range.split(page_size() * 5);
1444 let (left, right) = (left.unwrap(), right.unwrap());
1445 assert_eq!(left.range(), 0..page_size() * 5);
1446 assert_eq!(right.range(), page_size() * 5..page_size() * 10);
1447
1448 left.consume();
1449 right.consume();
1450 }
1451
1452 #[fuchsia::test]
1453 #[should_panic(expected = "0..8192 is not a subset of 0..4096")]
1454 async fn test_pager_range_bad_expand_panics() {
1455 let scope = ExecutionScope::new();
1456 let pager = Arc::new(Pager::new(scope).unwrap());
1457 let file = MockFile::new(pager.clone());
1458
1459 let pager_range = PageInRange::new(0..page_size() * 2, file, Epoch::global().guard());
1460 pager_range.expand(0..page_size()).consume();
1461 }
1462
1463 #[derive(ToWeakNode)]
1464 struct PagerRangeTestFile {
1465 vmo: zx::Vmo,
1466 pager_packet_receiver_registration: PagerPacketReceiverRegistration<Self>,
1467 pager: Pager,
1468 page_in_fn: Box<dyn Fn(PageInRange<Self>) + Send + Sync + 'static>,
1469 mark_dirty_fn: Box<dyn Fn(MarkDirtyRange<Self>) + Send + Sync + 'static>,
1470 }
1471
1472 impl PagerRangeTestFile {
1473 fn new<
1474 F1: Fn(PageInRange<Self>) + Send + Sync + 'static,
1475 F2: Fn(MarkDirtyRange<Self>) + Send + Sync + 'static,
1476 >(
1477 page_in_fn: F1,
1478 mark_dirty_fn: F2,
1479 ) -> Arc<Self> {
1480 Arc::new_cyclic(|weak| {
1481 let pager = Pager::new(ExecutionScope::new()).unwrap();
1482 let (vmo, pager_packet_receiver_registration) = pager
1483 .create_vmo(weak.clone(), page_size() * 2, zx::VmoOptions::TRAP_DIRTY)
1484 .unwrap();
1485 Self {
1486 vmo,
1487 pager_packet_receiver_registration,
1488 pager,
1489 page_in_fn: Box::new(page_in_fn),
1490 mark_dirty_fn: Box::new(mark_dirty_fn),
1491 }
1492 })
1493 }
1494 }
1495
1496 impl FxNode for PagerRangeTestFile {
1497 fn object_id(&self) -> u64 {
1498 1
1499 }
1500
1501 fn parent(&self) -> Option<Arc<crate::directory::FxDirectory>> {
1502 unimplemented!()
1503 }
1504
1505 fn set_parent(&self, _parent: Arc<crate::directory::FxDirectory>) {
1506 unimplemented!()
1507 }
1508
1509 fn open_count_add_one(&self) {
1510 unimplemented!()
1511 }
1512
1513 fn open_count_sub_one(self: Arc<Self>) {
1514 unimplemented!()
1515 }
1516
1517 fn object_descriptor(&self) -> fxfs::object_store::ObjectDescriptor {
1518 unimplemented!()
1519 }
1520 }
1521
1522 impl PagerBacked for PagerRangeTestFile {
1523 fn pager(&self) -> &Pager {
1524 &self.pager
1525 }
1526
1527 fn pager_packet_receiver_registration(&self) -> &PagerPacketReceiverRegistration<Self> {
1528 &self.pager_packet_receiver_registration
1529 }
1530
1531 fn vmo(&self) -> &zx::Vmo {
1532 &self.vmo
1533 }
1534
1535 fn page_in(self: Arc<Self>, range: PageInRange<Self>) {
1536 (self.page_in_fn)(range)
1537 }
1538
1539 fn mark_dirty(self: Arc<Self>, range: MarkDirtyRange<Self>) {
1540 (self.mark_dirty_fn)(range)
1541 }
1542
1543 fn on_zero_children(self: Arc<Self>) {}
1544
1545 fn byte_size(&self) -> u64 {
1546 unimplemented!();
1547 }
1548
1549 async fn aligned_read(
1550 &self,
1551 _range: std::ops::Range<u64>,
1552 ) -> Result<buffer::Buffer<'_>, Error> {
1553 unimplemented!();
1554 }
1555 }
1556
1557 fn real_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1558 let aux_vmo = zx::Vmo::create(range.len()).unwrap();
1559 range.supply_pages(&aux_vmo, 0);
1560 }
1561
1562 fn real_mark_dirty(range: MarkDirtyRange<PagerRangeTestFile>) {
1563 range.dirty_pages();
1564 }
1565
1566 #[fuchsia::test(threads = 2)]
1567 async fn test_page_in_range_supply_pages() {
1568 let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1569
1570 let mut data = vec![0; 20];
1571 file.vmo.read(&mut data, 0).unwrap();
1572 }
1573
1574 #[fuchsia::test(threads = 2)]
1575 async fn test_page_in_range_report_failure() {
1576 let file = PagerRangeTestFile::new(
1577 |range| {
1578 range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1579 },
1580 real_mark_dirty,
1581 );
1582
1583 let mut data = vec![0; 20];
1584 let err = file.vmo.read(&mut data, 0).unwrap_err();
1585 assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1586 }
1587
1588 #[cfg(debug_assertions)]
1589 #[fuchsia::test(threads = 2)]
1590 #[should_panic(expected = "PagerRange was dropped without sending a response")]
1591 async fn test_page_in_range_dropped() {
1592 let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1593
1594 let mut data = vec![0; 20];
1595 file.vmo.read(&mut data, 0).unwrap_err();
1596 }
1597
1598 #[cfg(not(debug_assertions))]
1599 #[fuchsia::test(threads = 2)]
1600 async fn test_page_in_range_dropped() {
1601 let file = PagerRangeTestFile::new(|_| {}, real_mark_dirty);
1602
1603 let mut data = vec![0; 20];
1604 let err = file.vmo.read(&mut data, 0).unwrap_err();
1605 assert_eq!(err, zx::Status::BAD_STATE);
1606 }
1607
1608 #[fuchsia::test(threads = 2)]
1609 async fn test_mark_dirty_range_dirty_pages() {
1610 let file = PagerRangeTestFile::new(real_supply_pages, real_mark_dirty);
1611
1612 let data = vec![5; 20];
1613 file.vmo.write(&data, 0).unwrap();
1614 }
1615
1616 #[fuchsia::test(threads = 2)]
1617 async fn test_mark_dirty_range_report_failure() {
1618 let file = PagerRangeTestFile::new(real_supply_pages, |range| {
1619 range.report_failure(zx::Status::IO_DATA_INTEGRITY);
1620 });
1621
1622 let data = vec![5; 20];
1623 let err = file.vmo.write(&data, 0).unwrap_err();
1624 assert_eq!(err, zx::Status::IO_DATA_INTEGRITY);
1625 }
1626
1627 #[cfg(debug_assertions)]
1628 #[fuchsia::test(threads = 2)]
1629 #[should_panic(expected = "PagerRange was dropped without sending a response")]
1630 async fn test_mark_dirty_range_dropped() {
1631 let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1632
1633 let data = vec![5; 20];
1634 file.vmo.write(&data, 0).unwrap_err();
1635 }
1636
1637 #[cfg(not(debug_assertions))]
1638 #[fuchsia::test(threads = 2)]
1639 async fn test_mark_dirty_range_dropped() {
1640 let file = PagerRangeTestFile::new(real_supply_pages, |_| {});
1641
1642 let data = vec![5; 20];
1643 let err = file.vmo.write(&data, 0).unwrap_err();
1644 assert_eq!(err, zx::Status::BAD_STATE);
1645 }
1646
1647 #[fuchsia::test(threads = 2)]
1648 async fn test_pager_range_chunks_iter_consumed() {
1649 let file = PagerRangeTestFile::new(
1650 |range| {
1651 let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1652 range.expand(0..page_size() * 2).chunks(page_size()).for_each(|range| {
1653 range.supply_pages(&aux_vmo, 0);
1654 });
1655 },
1656 real_mark_dirty,
1657 );
1658
1659 let mut data = vec![0; 20];
1660 file.vmo.read(&mut data, 0).unwrap();
1661 }
1662
1663 fn partial_supply_pages(range: PageInRange<PagerRangeTestFile>) {
1664 let aux_vmo = zx::Vmo::create(page_size()).unwrap();
1665 range.expand(0..page_size() * 2).chunks(page_size()).take(1).for_each(|range| {
1668 range.supply_pages(&aux_vmo, 0);
1669 });
1670 }
1671
1672 #[cfg(debug_assertions)]
1673 #[fuchsia::test(threads = 2)]
1674 #[should_panic(expected = "PagerRangeChunksIter was dropped without being fully consumed")]
1675 async fn test_pager_range_chunks_iter_dropped() {
1676 let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1677
1678 let mut data = vec![0; 20];
1679 file.vmo.read(&mut data, page_size()).unwrap_err();
1682 }
1683
1684 #[cfg(not(debug_assertions))]
1685 #[fuchsia::test(threads = 2)]
1686 async fn test_pager_range_chunks_iter_dropped() {
1687 let file = PagerRangeTestFile::new(partial_supply_pages, real_mark_dirty);
1688
1689 let mut data = vec![0; 20];
1690 let err = file.vmo.read(&mut data, page_size()).unwrap_err();
1693 assert_eq!(err, zx::Status::BAD_STATE);
1694 }
1695
1696 #[fuchsia::test(threads = 2)]
1697 async fn test_grow_zeroes_new_bytes() {
1698 let scope = ExecutionScope::new();
1702 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1703 let page_size = page_size();
1704 let vmo_size: u64 = page_size * 2;
1705 let file_a =
1706 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::RESIZABLE);
1707 let file_b =
1708 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1709 let mut buffer = vec![VmoDirtyRange::default(); 3];
1710
1711 assert_eq!(file_a.vmo().get_stream_size().unwrap(), page_size * 2);
1712 assert_eq!(file_b.vmo().get_stream_size().unwrap(), page_size * 2);
1713
1714 let mut read_buf = vec![0u8; page_size as usize];
1716 file_a.vmo().read(&mut read_buf, page_size).expect("read a");
1717 assert_eq!(
1718 file_a.pager_requests(true),
1719 vec![PagerRequest::PageIn(page_size..page_size * 2)]
1720 );
1721 file_b.vmo().read(&mut read_buf, page_size).expect("read b");
1722 assert_eq!(
1723 file_b.pager_requests(true),
1724 vec![PagerRequest::PageIn(page_size..page_size * 2)]
1725 );
1726
1727 let vmo_size = page_size * 8;
1729 file_a.vmo().set_size(vmo_size).unwrap();
1730 file_b.vmo().set_stream_size(vmo_size).unwrap();
1731
1732 assert_eq!(
1733 pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1734 (1, 0)
1735 );
1736 assert_eq!(
1737 buffer[0],
1738 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1739 );
1740 assert_eq!(
1741 pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1742 (1, 0)
1743 );
1744 assert_eq!(
1745 buffer[0],
1746 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1747 );
1748
1749 let mut read_buf = vec![0u8; page_size as usize * 6];
1751 file_a.vmo().read(&mut read_buf, page_size * 2).expect("read a");
1752 assert_eq!(file_a.pager_requests(true), vec![]);
1753 file_b.vmo().read(&mut read_buf, page_size * 2).expect("read b");
1754 assert_eq!(file_b.pager_requests(true), vec![]);
1755
1756 let vmo_size = page_size * 8;
1758 file_a.vmo().set_size(vmo_size).unwrap();
1759 file_b.vmo().set_stream_size(vmo_size).unwrap();
1760 assert_eq!(
1761 pager.query_dirty_ranges(file_a.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1762 (1, 0)
1763 );
1764 assert_eq!(
1765 buffer[0],
1766 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1767 );
1768 assert_eq!(
1769 pager.query_dirty_ranges(file_b.vmo(), 0..vmo_size, &mut buffer).unwrap(),
1770 (1, 0)
1771 );
1772 assert_eq!(
1773 buffer[0],
1774 VmoDirtyRange { offset: page_size * 2, length: page_size * 6, options: 1 },
1775 );
1776 assert_eq!(file_a.pager_requests(true), vec![],);
1778 assert_eq!(file_b.pager_requests(true), vec![],);
1779
1780 file_b.vmo().write(&[1; 10], page_size * 2).unwrap();
1782 assert_eq!(
1783 file_b.pager_requests(true),
1784 vec![PagerRequest::Dirty(page_size * 2..page_size * 3)],
1785 );
1786
1787 let vmo_size = page_size * 4;
1789 file_b.vmo().set_stream_size(vmo_size).unwrap();
1790 let stream =
1791 zx::Stream::create(zx::StreamOptions::MODE_WRITE, file_b.vmo(), page_size * 4).unwrap();
1792 stream.write(zx::StreamWriteOptions::empty(), &vec![10; page_size as usize]).unwrap();
1793 assert_eq!(
1794 file_b.pager_requests(true),
1795 vec![PagerRequest::Dirty(page_size * 4..page_size * 5)],
1796 );
1797
1798 let stream = zx::Stream::create(
1800 zx::StreamOptions::MODE_WRITE | zx::StreamOptions::MODE_APPEND,
1801 file_b.vmo(),
1802 page_size * 5,
1803 )
1804 .unwrap();
1805 stream.write(zx::StreamWriteOptions::empty(), &[10; 1024]).unwrap();
1806 assert_eq!(
1807 file_b.pager_requests(true),
1808 vec![PagerRequest::Dirty(page_size * 5..page_size * 6)],
1809 );
1810
1811 scope.wait().await;
1812 }
1813
1814 #[fuchsia::test(threads = 2)]
1815 async fn test_pathological_shrink_unbounded_vmo() {
1816 let scope = ExecutionScope::new();
1817 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1818 let page_size = page_size();
1819 let vmo_size: u64 = page_size * 25600; let file =
1821 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1822 let mut buffer = vec![VmoDirtyRange::default(); 10];
1823
1824 assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1825
1826 for i in 0..vmo_size / 256 {
1828 let data = vec![5; 20];
1829 file.vmo.write(&data, i * 256).expect("write failed");
1830 }
1831
1832 for i in (0..25600u64 / 1024).rev() {
1833 file.vmo().set_stream_size(i * 1024 + page_size / 2).unwrap();
1834 }
1835
1836 assert_eq!(pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap(), (1, 0));
1837 assert_eq!(buffer[0..1], [VmoDirtyRange { offset: 0, length: page_size, options: 0 },]);
1838
1839 scope.wait().await;
1840 }
1841
1842 #[fuchsia::test(threads = 2)]
1843 async fn test_pathological_shrink_unbounded_vmo_with_gaps() {
1844 let scope = ExecutionScope::new();
1845 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1846 let page_size = page_size();
1847 let vmo_size: u64 = page_size * 25600; let file =
1849 MockFile::new_with_size_and_type(pager.clone(), vmo_size, zx::VmoOptions::UNBOUNDED);
1850 let mut buffer = vec![VmoDirtyRange::default(); 10];
1851
1852 assert_eq!(file.vmo().get_stream_size().unwrap(), vmo_size);
1853
1854 for offset in (0u64..vmo_size).step_by((page_size * 2) as usize) {
1856 let data = vec![5; 20];
1857 file.vmo.write(&data, offset).expect("write failed");
1858 }
1859 let (actual, remaining) =
1861 pager.query_dirty_ranges(file.vmo(), 0..vmo_size, &mut buffer).unwrap();
1862 assert_eq!(actual + remaining, 25600 / 2);
1863
1864 let mut offset = vmo_size.saturating_sub(5 * page_size - 2);
1866 'outer: loop {
1869 for delta in [5 * page_size, 4 * page_size] {
1870 file.vmo().set_stream_size(offset).unwrap();
1871 assert_eq!(
1872 pager.query_dirty_ranges(file.vmo(), offset..vmo_size, &mut buffer).unwrap(),
1873 (1, 0)
1874 );
1875 assert_eq!(
1877 buffer[0..1],
1878 [VmoDirtyRange {
1879 offset: round_down(offset, page_size),
1880 length: page_size,
1881 options: 0
1882 },]
1883 );
1884 offset = offset.saturating_sub(delta);
1885 if offset == 0 {
1886 break 'outer;
1887 }
1888 }
1889 }
1890
1891 scope.wait().await;
1892 }
1893
1894 #[fuchsia::test(threads = 2)]
1895 async fn test_grow_unbounded_vmo() {
1896 let scope = ExecutionScope::new();
1897 let pager = Arc::new(Pager::new(scope.clone()).unwrap());
1898 let file = MockFile::new_with_size_and_type(pager.clone(), 128, zx::VmoOptions::UNBOUNDED);
1899
1900 let data = vec![1; 128];
1901 file.vmo().write(&data, 128).expect("write failed");
1903 file.vmo().set_stream_size(256).unwrap();
1905 assert_eq!(file.vmo().get_stream_size().expect("get_stream_size"), 256);
1906
1907 let mut data = vec![0xff; 256];
1908 file.vmo().read(&mut data, 0).expect("read");
1909 let expected = vec![0; 256];
1910 assert_eq!(data, expected);
1911
1912 file.vmo().set_stream_size(page_size() * 3).unwrap();
1913 let mut buffer = vec![VmoDirtyRange::default(); 10];
1914 assert_eq!(
1915 pager.query_dirty_ranges(file.vmo(), 0..page_size() * 3, &mut buffer).unwrap(),
1916 (2, 0)
1917 );
1918 assert_eq!(
1920 buffer[0..2],
1921 [
1922 VmoDirtyRange { offset: 0, length: page_size(), options: 0 },
1923 VmoDirtyRange { offset: page_size(), length: page_size() * 2, options: 1 },
1924 ]
1925 );
1926
1927 scope.wait().await;
1928 }
1929}