virtio_device/
fake_queue.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Fake queue for simulating driver interactions in unittests
6//!
7//! To facilitate writing unittests it is useful to manipulate the queues from the perspective of a
8//! a driver. This module contains helpers that allow for:
9//!  * Building simple descriptor chains
10//!  * Publishing the build descriptors in the available ring
11//!  * Receiving written descriptors from the used ring
12//! This functionality centers around the [`FakeQueue`] implementation.
13//!
14//! For simplicity of writing tests the [`TestQueue`] struct packages together all the pieces
15//! commonly needed to write a test.
16//!
17//! This module is available as, in addition to be used for writing the unittests for this library,
18//! it can also be used for writing unittests for actual virtio device drivers without needing a
19//! guest environment.
20
21use crate::mem::{DeviceRange, DriverRange};
22use crate::queue::{Queue, QueueMemory};
23use crate::ring::{self, DescAccess, VRING_DESC_F_INDIRECT};
24use crate::util::NotificationCounter;
25use fuchsia_sync::Mutex;
26use std::alloc::{self, GlobalAlloc};
27use std::collections::HashMap;
28
29// Helper struct that just holds an allocation and returns it to the system allocator on drop.
30struct IdentityAlloc {
31    // alloc must not be null and be the return result from having passed layout to System.alloc,
32    // such that it is safe to pass alloc and layout to System.dealloc.
33    alloc: *mut u8,
34    layout: alloc::Layout,
35}
36
37impl IdentityAlloc {
38    fn new(layout: alloc::Layout) -> Option<Self> {
39        if layout.size() == 0 {
40            return None;
41        }
42        // The safety requirement on alloc_zeroed is that layout not be for a zero size, which we
43        // validated just above.
44        let alloc = unsafe { alloc::System.alloc_zeroed(layout) };
45        if alloc.is_null() {
46            return None;
47        }
48        Some(IdentityAlloc { alloc, layout })
49    }
50    fn base(&self) -> usize {
51        self.alloc as usize
52    }
53}
54
55impl Drop for IdentityAlloc {
56    fn drop(&mut self) {
57        // It is an invariant on alloc and layout that alloc is not null and that these are valid to
58        // pass to System.dealloc.
59        unsafe { alloc::System.dealloc(self.alloc, self.layout) };
60    }
61}
62
63/// Implementation of [`crate::mem::DriverMem`] assuming the identity translation.
64///
65/// Can be used to allocate valid [`DeviceRange`] using the [`range_with_layout`] or [`new_range`]
66/// methods. This then implements the identity transformation in [`translate`] meaning that:
67///
68/// ```
69/// let range = identity_driver_mem.new_range(64)?;
70/// assert_eq!(identity_driver_mem.translate(range.get().into()), Some(range)
71/// ```
72///
73/// There is no mechanism to free or deallocate any constructed ranges, this is neccessary to ensure
74/// they remain valid their provided lifetimes. Allocations will be freed once the
75/// [`IdentityDriverMem`] is dropped.
76pub struct IdentityDriverMem {
77    // Allocations are stored using a DriverRange as the key, instead of a DeviceRange, to avoid
78    // needing to invent a fake lifetime annotation. It is a requirement that these allocations only
79    // be added to over the lifetime of the IdentityDriverMem.
80    allocations: Mutex<Vec<(DriverRange, IdentityAlloc)>>,
81}
82
83impl crate::mem::DriverMem for IdentityDriverMem {
84    fn translate<'a>(&'a self, driver: DriverRange) -> Option<DeviceRange<'a>> {
85        if driver.len() == 0 {
86            return None;
87        }
88        // See `driver` is contained in any of the ranges we have.
89        let range = self
90            .allocations
91            .lock()
92            .iter()
93            .map(|x| x.0.clone())
94            .find(|x| x.0.contains(&driver.0.start) && x.0.contains(&(driver.0.end - 1)))?
95            .0;
96        // We know (see `register`) that the DriverRange in allocations is a valid DeviceRange. As
97        // the backing memory will not be free'd until IdentityDriverMem is dropped, we can safely
98        // provide the lifetime `'a` on the range.
99        let device = unsafe { DeviceRange::new(range) };
100        // Now trim device down to the potential sub range that was requested.
101        let device = device.split_at(driver.0.start - device.get().start)?.1;
102        Some(device.split_at(driver.len())?.0)
103    }
104}
105
106impl IdentityDriverMem {
107    /// Construct a new [`IdentityDriverMem`]
108    pub fn new() -> IdentityDriverMem {
109        IdentityDriverMem { allocations: Mutex::new(Vec::new()) }
110    }
111
112    // Helper method for localizing the reasoning on the allocations map.
113    // # Safety
114    //
115    // Require that `alloc` be a valid `IdentityAlloc` and that range.start == alloc.base and
116    // range.len() <= alloc.layout.size(). This ensures range can be safely re-interpreted as a
117    // DeviceRange.
118    unsafe fn register(&self, range: DriverRange, alloc: IdentityAlloc) {
119        self.allocations.lock().push((range, alloc));
120    }
121
122    fn range_with_layout_size<'a>(
123        &'a self,
124        size_bytes: usize,
125        layout: alloc::Layout,
126    ) -> Option<DeviceRange<'a>> {
127        // Validate that our desired length fits inside the amount we are going to allocate
128        if size_bytes > layout.size() {
129            return None;
130        }
131        let alloc = IdentityAlloc::new(layout)?;
132        let base = alloc.base();
133        // The memory we provide to DeviceRange is valid as it just came from the allocator. And it
134        // will remain valid until the [`IdentityAlloc`] is dropped and the memory freed, which does
135        // not happen till this object is destroyed, and we have a borrow against this object of the
136        // same lifetime as we provide in the DeviceRange.
137        let range = unsafe { DeviceRange::new(base..(base + size_bytes)) };
138        // register is safe to call since we provide a range that is pulled out of a valid
139        // DeviceRange and range was is from the provided allocation.
140        unsafe { self.register(DriverRange(range.get()), alloc) };
141        Some(range)
142    }
143
144    /// Allocate with a specific [`alloc::Layout`]
145    ///
146    /// Specifying a specific [`alloc::Layout`] for the range is to allow for alignments to be
147    /// specified so that [underlying](#get) [`DeviceRange`] can be accessed directly as a desired
148    /// object using [`DeviceRange::try_ptr`].
149    ///
150    /// The allocated range will be zeroed.
151    pub fn range_with_layout<'a>(&'a self, layout: alloc::Layout) -> Option<DeviceRange<'a>> {
152        self.range_with_layout_size(layout.size(), layout)
153    }
154
155    /// Allocate a range to hold `size_bytes`
156    ///
157    /// The backing allocation will be aligned to match a [`u64`], but the [`DeviceRange`] reported
158    /// by [`get`](#get) will be exactly `size_bytes` long.
159    ///
160    /// The allocated range will be zeroed.
161    pub fn new_range<'a>(&'a self, size_bytes: usize) -> Option<DeviceRange<'a>> {
162        if size_bytes == 0 {
163            return None;
164        }
165        let layout = alloc::Layout::from_size_align(size_bytes, std::mem::align_of::<u64>())
166            .ok()?
167            .pad_to_align();
168        self.range_with_layout_size(size_bytes, layout)
169    }
170
171    /// Allocates ranges to fill and return a [`QueueMemory`]
172    pub fn alloc_queue_memory<'a>(&'a self, queue_size: u16) -> Option<QueueMemory<'a>> {
173        let desc = self.new_range(std::mem::size_of::<ring::Desc>() * queue_size as usize)?;
174        let avail = self.new_range(ring::Driver::avail_len_for_queue_size(queue_size))?;
175        let used = self.new_range(ring::Device::used_len_for_queue_size(queue_size))?;
176        Some(QueueMemory { desc, avail, used })
177    }
178}
179
180/// Simulates driver side interactions of a queue for facilitating tests.
181///
182/// This is termed a `FakeQueue` as, whilst it can correctly function as a driver side queue
183/// manager, its methods and internal book keeping are aimed at writing tests and not being
184/// efficient.
185pub struct FakeQueue<'a> {
186    device: ring::as_driver::Device<'a>,
187    driver: ring::as_driver::Driver<'a>,
188    queue_size: u16,
189    next_desc: u16,
190    next_avail: u16,
191    next_used: u16,
192    chains: HashMap<u16, Chain>,
193}
194
195impl<'a> FakeQueue<'a> {
196    /// Construct a [`FakeQueue`] to act as driver for provided rings.
197    ///
198    /// Takes a [`ring::Driver`] and [`ring::Device`] and constructs a view to the same memory to
199    /// act as the driver.
200    ///
201    /// This assumes the rings have been correctly initialized, which by the virtio
202    /// specification means they have been zeroed.
203    pub fn new(driver: &ring::Driver<'a>, device: &ring::Device<'a>) -> Option<Self> {
204        let queue_size = driver.queue_size();
205        if queue_size != device.queue_size() {
206            return None;
207        }
208        let driver = ring::as_driver::Driver::new(driver);
209        let device = ring::as_driver::Device::new(device);
210        Some(FakeQueue {
211            device,
212            driver,
213            queue_size,
214            next_desc: 0,
215            next_avail: 0,
216            next_used: 0,
217            chains: HashMap::new(),
218        })
219    }
220
221    pub fn publish_indirect(
222        &mut self,
223        chain: Chain,
224        mem: &IdentityDriverMem,
225    ) -> Option<(u16, u16)> {
226        if chain.descriptors.len() == 0 {
227            return None;
228        }
229
230        let indirect_range =
231            mem.new_range(chain.descriptors.len() * std::mem::size_of::<ring::Desc>())?;
232
233        let mut iter = chain.descriptors.iter().enumerate().peekable();
234        while let Some((index, desc)) = iter.next() {
235            let has_next = iter.peek().is_some();
236
237            let write_flags =
238                if desc.access == DescAccess::DeviceWrite { ring::VRING_DESC_F_WRITE } else { 0 };
239            let next_flags = if has_next { ring::VRING_DESC_F_NEXT } else { 0 };
240            let next_desc = if has_next { index as u16 + 1 } else { 0 };
241            let ring_desc = ring::as_driver::make_desc(
242                desc.data_addr,
243                desc.data_len,
244                write_flags | next_flags,
245                next_desc,
246            );
247
248            let ptr = indirect_range.try_mut_ptr::<ring::Desc>()?;
249            unsafe {
250                std::ptr::copy_nonoverlapping::<ring::Desc>(
251                    &ring_desc as *const ring::Desc,
252                    ptr.add(index as usize),
253                    1,
254                )
255            };
256        }
257        self.driver.write_desc(
258            self.next_desc,
259            ring::as_driver::make_desc(
260                indirect_range.get().start as u64,
261                indirect_range.len() as u32,
262                VRING_DESC_F_INDIRECT,
263                0,
264            ),
265        );
266        self.update_index(chain, 1)
267    }
268
269    /// Attempt to publish a [`Chain`] into the ring.
270    ///
271    /// This returns a `None` if the chain is of zero length, or there are not enough available
272    /// descriptors in the ring for the chain. Otherwise it returns the current available index, and
273    /// the index of the first descriptor in the chain.
274    pub fn publish(&mut self, chain: Chain) -> Option<(u16, u16)> {
275        if chain.descriptors.len() == 0 {
276            return None;
277        }
278        // Need to validate that enough descriptors are available. We know next_desc is either the
279        // start of a chain, or is free, as such we just need to walk forward and check if any
280        // chains start in our desired descriptor range.
281        let desc_count = chain.descriptors.len() as u16;
282        // Use a loop to check so that we can wrap indexes in a clearly readable way.
283        for offset in 0..desc_count {
284            let index = self.next_desc.wrapping_add(offset) % self.queue_size;
285            if self.chains.get(&index).is_some() {
286                return None;
287            }
288        }
289        // Write descriptors
290        let mut iter = chain.descriptors.iter().enumerate().peekable();
291        while let Some((index, desc)) = iter.next() {
292            let has_next = iter.peek().is_some();
293            let ring_index = self.next_desc.wrapping_add(index as u16) % self.queue_size;
294            let write_flags =
295                if desc.access == DescAccess::DeviceWrite { ring::VRING_DESC_F_WRITE } else { 0 };
296            let next_flags = if has_next { ring::VRING_DESC_F_NEXT } else { 0 };
297            // If a specific next descriptor was supplied by the chain then use it, otherwise
298            // calculate the actual next index we will insert.
299            let next_desc = if has_next {
300                desc.next.unwrap_or_else(|| ring_index.wrapping_add(1) % self.queue_size)
301            } else {
302                0
303            };
304            self.driver.write_desc(
305                ring_index,
306                ring::as_driver::make_desc(
307                    desc.data_addr,
308                    desc.data_len,
309                    write_flags | next_flags,
310                    next_desc,
311                ),
312            );
313        }
314        self.update_index(chain, desc_count)
315    }
316
317    fn update_index(&mut self, chain: Chain, desc_count: u16) -> Option<(u16, u16)> {
318        // Mark the start of the descriptor chain.
319        let first_desc = self.next_desc % self.queue_size;
320        let avail_index = self.next_avail;
321        self.driver.write_avail(avail_index, first_desc);
322        // Available index is monotonic increasing and does not wrap at queue_size.
323        self.next_avail = self.next_avail.wrapping_add(1);
324        // Signal it as existing.
325        self.driver.write_idx(self.next_avail);
326        // Record the index we should start allocating descriptors from next time. This range may
327        // or may not be free right now.
328        self.next_desc = self.next_desc.wrapping_add(desc_count) % self.queue_size;
329        // Store the descriptor in our map so we can return it in next_used.
330        self.chains.insert(first_desc, chain);
331        // Return the avail index we used and where the descriptor chain starts.
332        Some((avail_index, first_desc))
333    }
334
335    /// Retrieve the next returned chain, if any.
336    ///
337    /// If a chain has been returned by the device return a [`UsedChain`], otherwise a `None.
338    pub fn next_used(&mut self) -> Option<UsedChain> {
339        // Check if the device has returned anything.
340        if self.device.read_idx() == self.next_used {
341            return None;
342        }
343        // Read out the chain that was returned.
344        let (id, written) =
345            ring::as_driver::deconstruct_used(self.device.read_used(self.next_used));
346        // Expect something in the next slot next time.
347        self.next_used = self.next_used.wrapping_add(1);
348        // Remove the chain from our internal map and return it.
349        self.chains.remove(&(id as u16)).map(|chain| UsedChain { written, chain })
350    }
351}
352
353/// Represents a chain returned by a device.
354pub struct UsedChain {
355    written: u32,
356    chain: Chain,
357}
358
359impl UsedChain {
360    /// Get the amount of data written to the chain.
361    ///
362    /// This is the amount of data the device claimed it wrote to the chain and could be incorrect,
363    /// for example some devices do not zero this field when they return a read only chain.
364    pub fn written(&self) -> u32 {
365        self.written
366    }
367
368    /// Iterate over any written portions.
369    ///
370    /// Iterates over the writable portion of the descriptor chain, up to the amount that was
371    /// claimed to be [`written`](#written). The iterator produces
372    /// `(driver_addr as u64, length as u32)` tuples and it is the responsibility of the caller to
373    /// know if this range is valid and how to access it.
374    pub fn data_iter<'a>(&'a self) -> ChainDataIter<'a> {
375        ChainDataIter { next: Some(0), remaining: self.written, chain: &self.chain }
376    }
377}
378
379/// Iterator for the data in a [`UsedChain`]
380pub struct ChainDataIter<'a> {
381    next: Option<usize>,
382    remaining: u32,
383    chain: &'a Chain,
384}
385
386impl<'a> Iterator for ChainDataIter<'a> {
387    type Item = (u64, u32);
388
389    fn next(&mut self) -> Option<Self::Item> {
390        if self.remaining == 0 {
391            return None;
392        }
393        let next = self.next.take()?;
394        // Walk the descriptors till we find a writable one.
395        let (index, desc) = self
396            .chain
397            .descriptors
398            .iter()
399            .enumerate()
400            .skip(next)
401            .find(|(_, desc)| desc.access == DescAccess::DeviceWrite)?;
402        self.next = Some(index + 1);
403        let size = std::cmp::min(self.remaining, desc.data_len);
404        self.remaining = self.remaining - size;
405        Some((desc.data_addr, size))
406    }
407}
408
409struct DescriptorInfo {
410    access: DescAccess,
411    data_addr: u64,
412    data_len: u32,
413    next: Option<u16>,
414}
415
416/// Descriptor chain that can be published in a [`FakeQueue`]
417pub struct Chain {
418    descriptors: Vec<DescriptorInfo>,
419}
420
421impl Chain {
422    /// Build a descriptor chain with zeroed readable and writable descriptors.
423    ///
424    /// For every value in the `readable` and `writable` slice provided, allocates a descriptor of
425    /// that many bytes in the descriptor chain.
426    pub fn with_lengths(readable: &[u32], writable: &[u32], mem: &IdentityDriverMem) -> Self {
427        let builder = readable
428            .iter()
429            .cloned()
430            .fold(ChainBuilder::new(), |build, range| build.readable_zeroed(range, mem));
431        writable.iter().cloned().fold(builder, |build, range| build.writable(range, mem)).build()
432    }
433
434    /// Build a descriptor chain providing data for readable descriptors.
435    ///
436    /// Similar to [`with_lengths`](#with_lengths) except the readable descriptors are populated
437    /// with a copy of the provided data slice instead.
438    pub fn with_data<T: Copy>(
439        readable: &[&[T]],
440        writable: &[u32],
441        mem: &IdentityDriverMem,
442    ) -> Self {
443        let builder = readable
444            .iter()
445            .cloned()
446            .fold(ChainBuilder::new(), |build, range| build.readable(range, mem));
447        writable.iter().cloned().fold(builder, |build, range| build.writable(range, mem)).build()
448    }
449
450    /// Build a descriptor chain with raw data references.
451    ///
452    /// Does not allocate data for any descriptors, instead puts the provided address and length
453    /// directly into the final descriptor. This is intentionally designed to allow to you to build
454    /// corrupt and invalid descriptor chains for the purposes of testing.
455    pub fn with_exact_data(chain: &[(ring::DescAccess, u64, u32)]) -> Self {
456        chain
457            .iter()
458            .fold(ChainBuilder::new(), |builder, (writable, addr, len)| {
459                builder.reference(*writable, *addr, *len)
460            })
461            .build()
462    }
463}
464
465/// Builder for a [`Chain`]
466pub struct ChainBuilder(Chain);
467
468impl ChainBuilder {
469    /// Construct a new [`ChainBuilder`]
470    pub fn new() -> Self {
471        ChainBuilder(Chain { descriptors: Vec::new() })
472    }
473
474    /// Amend the last descriptor added with a specific next value.
475    ///
476    /// By default the next field in the published [`ring::Desc`] will be set automatically by the
477    /// [`FakeQueue`] when publishing the chain, since the [`FakeQueue`] is the one allocating the
478    /// actual descriptor ring slots.
479    ///
480    /// For testing this can be used to override the next field that [`FakeQueue::publish`] will
481    /// generate and is intended for creating broken descriptor chains. It is not intended that this
482    /// can be used and result in a properly functioning chain and queue.
483    ///
484    /// # Panics
485    ///
486    /// Will panic if no descriptor has yet been added to the chain.
487    pub fn amend_next(mut self, next: u16) -> Self {
488        self.0.descriptors.last_mut().unwrap().next = Some(next);
489        self
490    }
491
492    /// Append new readable descriptor with a copy of `data`
493    ///
494    /// # Panics
495    ///
496    /// Will panic if there is not enough memory to allocate a buffer to hold `data`.
497    pub fn readable<T: Copy>(mut self, data: &[T], mem: &IdentityDriverMem) -> Self {
498        let layout = alloc::Layout::for_value(data);
499        let mem = mem.range_with_layout(layout).unwrap();
500        // This usage of copy_nonoverlapping is valid since
501        //  * src region is from a slice reference and can assumed to be valid and correctly aligned
502        //  * dst region produced from [`DeviceRange`] is defined to be valid as long as
503        //    the DeviceRange is held alive, which it is over the duration of the unsafe block.
504        //  * dst region is known to be correctly aligned as it was constructed aligned, and
505        //    try_mut_ptr only returns validly aligned pointers.
506        //  * src and dst do not overlap as the [`DeviceRange`] is valid, and valid device ranges do
507        //    not overlap with other rust objects.
508        unsafe {
509            std::ptr::copy_nonoverlapping::<T>(
510                data.as_ptr(),
511                // unwrap cannot fail since we allocated with alignment of T.
512                mem.try_mut_ptr::<T>().unwrap(),
513                data.len(),
514            )
515        };
516        self.0.descriptors.push(DescriptorInfo {
517            access: DescAccess::DeviceRead,
518            data_addr: mem.get().start as u64,
519            data_len: layout.size() as u32,
520            next: None,
521        });
522        self
523    }
524
525    /// Append an empty descriptor of the specified type and length
526    ///
527    /// # Panics
528    ///
529    /// Will panic if there is not enough memory to allocate a buffer of len `data_len`.
530    pub fn zeroed(mut self, access: DescAccess, data_len: u32, mem: &IdentityDriverMem) -> Self {
531        let mem = mem.new_range(data_len as usize).unwrap();
532        self.0.descriptors.push(DescriptorInfo {
533            access,
534            data_addr: mem.get().start as u64,
535            data_len,
536            next: None,
537        });
538        self
539    }
540
541    /// Append a descriptor with raw data.
542    ///
543    /// This does not perform any allocations and will pass through the exact `data_addr` and
544    /// `data_len` provided.
545    pub fn reference(mut self, access: DescAccess, data_addr: u64, data_len: u32) -> Self {
546        self.0.descriptors.push(DescriptorInfo { access, data_addr, data_len, next: None });
547        self
548    }
549
550    pub fn readable_zeroed(self, len: u32, mem: &IdentityDriverMem) -> Self {
551        self.zeroed(DescAccess::DeviceRead, len, mem)
552    }
553    pub fn readable_reference(self, addr: u64, len: u32) -> Self {
554        self.reference(DescAccess::DeviceRead, addr, len)
555    }
556    pub fn writable(self, len: u32, mem: &IdentityDriverMem) -> Self {
557        self.zeroed(DescAccess::DeviceWrite, len, mem)
558    }
559    pub fn writable_reference(self, addr: u64, len: u32) -> Self {
560        self.reference(DescAccess::DeviceWrite, addr, len)
561    }
562
563    /// Consume the builder and produce a [`Chain`].
564    pub fn build(self) -> Chain {
565        self.0
566    }
567}
568
569/// Wraps common state needed for writing test code with a [`FakeQueue`].
570pub struct TestQueue<'a> {
571    pub queue: Queue<'a, NotificationCounter>,
572    pub notify: NotificationCounter,
573    pub fake_queue: FakeQueue<'a>,
574}
575
576impl<'a> TestQueue<'a> {
577    /// Allocates a [`Queue`] and [`FakeQueue`] for unit tests.
578    pub fn new(size: usize, mem: &'a IdentityDriverMem) -> Self {
579        let mem = mem.alloc_queue_memory(size as u16).unwrap();
580        let notify = NotificationCounter::new();
581        let driver = ring::Driver::new(mem.desc.clone(), mem.avail.clone()).unwrap();
582        let device = ring::Device::new(mem.used.clone()).unwrap();
583
584        let fake_queue = FakeQueue::new(&driver, &device).unwrap();
585        let queue = Queue::new_from_rings(driver, device, notify.clone()).unwrap();
586        TestQueue { queue, notify, fake_queue }
587    }
588}