virtio_device/ring.rs
1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4//! Minimal type-safe definitions of the virtio data structures.
5//!
6//! Contains definitions and type-safe accessors and manipulators of the virtio data structures.
7//! For the leaf data structures like [descriptors](Desc) these definitions are simply the in
8//! memory layout as a Rust `struct`.
9//!
10//! Unfortunately the virtqueues are a variable sized data structure, whose length is not known till
11//! run time as the size is determined by the driver. Representing the virtqueue as 'just' a Rust
12//! `struct` is therefore not possible.
13//!
14//! Two structs are used as for the representation as it allows for separating the
15//! [`Device`] owned and [`Driver`] owned portions of the virtqueue into
16//! separate portions with their correct mutability.
17//!
18//! Due to the split into the [`Driver`] and [`Device`] structs there is
19//! no specifically named `virtqueue` in this module. The [Queue](crate::queue::Queue) builds on the
20//! [`Driver`] and [`Device`] to build useful virtqueue functionality.
21//!
22//! These abstractions are intended to be type-safe, but not enforce correct implementation of the
23//! virtio protocols. As such reading the [virtio specification]
24//! (https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html)
25//! is required to correctly use this module. Most likely you do not want to use these directly and
26//! want to use the higher level [`queue`](crate::queue), and [`chain`](crate::chain) modules that
27//! provide easier to use wrappers.
28
29use crate::mem::DeviceRange;
30use std::marker::PhantomData;
31use std::mem;
32use std::sync::atomic;
33
34/// Descriptor has a next field.
35pub const VRING_DESC_F_NEXT: u16 = 1 << 0;
36/// Descriptor is device write-only (otherwise device read-only).
37pub const VRING_DESC_F_WRITE: u16 = 1 << 1;
38/// Descriptor contains a list of buffer descriptors.
39pub const VRING_DESC_F_INDIRECT: u16 = 1 << 2;
40
41/// Describes descriptor access direction.
42///
43/// Any given descriptor is either device read only or device write only.
44#[derive(Debug, Clone, Copy, PartialEq, Eq)]
45pub enum DescAccess {
46 DeviceRead,
47 DeviceWrite,
48}
49
50/// Virtio descriptor data structure
51///
52/// Represents the in memory format of virtio descriptors and provides some accessors.
53#[repr(C)]
54#[derive(Debug, Clone, Copy)]
55pub struct Desc {
56 addr: u64,
57 len: u32,
58 // This is not bitflags! as it may contain additional bits that we do not define
59 // and so would violate the bitflags type safety.
60 flags: u16,
61 next: u16,
62}
63
64impl Desc {
65 /// Returns whether the [next](VRING_DESC_F_NEXT) bit is set.
66 ///
67 /// Typically the [next](#next) method is preferred.
68 pub fn has_next(&self) -> bool {
69 self.flags & VRING_DESC_F_NEXT != 0
70 }
71
72 /// Returns whether the [indirect](VRING_DESC_F_INDIRECT) bit is set.
73 pub fn is_indirect(&self) -> bool {
74 self.flags & VRING_DESC_F_INDIRECT != 0
75 }
76
77 /// Returns whether the [write](VRING_DESC_F_WRITE) bit is set.
78 ///
79 /// This flag should be ignored when [is_indirect](#is_indirect) is true.
80 pub fn write_only(&self) -> bool {
81 self.flags & VRING_DESC_F_WRITE != 0
82 }
83
84 /// Return the descriptor access type.
85 ///
86 /// This is a convenience wrapper around [write_only](#write_only) to provide a safer type.
87 pub fn access_type(&self) -> DescAccess {
88 if self.write_only() {
89 DescAccess::DeviceWrite
90 } else {
91 DescAccess::DeviceRead
92 }
93 }
94
95 /// Returns the next descriptor if there is one, otherwise a `None`.
96 pub fn next(&self) -> Option<u16> {
97 if self.has_next() {
98 Some(self.next)
99 } else {
100 None
101 }
102 }
103
104 /// Returns the guest (address, length) pair representing the contents of this descriptor.
105 ///
106 /// No validation of the address and length is performed. In particular the range could be
107 /// invalid or wrap.
108 pub fn data(&self) -> (u64, u32) {
109 (self.addr, self.len)
110 }
111}
112
113/// Represents the layout of a virtio header
114///
115/// Due to the need to access the header fields through raw pointers this struct is never directly
116/// used, however we define it so that we can take the `size_of` it, and to make the translation to
117/// our manual offsets more obvious.
118#[repr(C)]
119struct HeaderLayout {
120 _flags: u16,
121 _idx: u16,
122}
123
124impl HeaderLayout {
125 // Define the offset of the two fields in the header layout. These offsets will be used to add
126 // to u16 pointers.
127 const FLAGS_OFFSET: usize = 0;
128 const IDX_OFFSET: usize = 1;
129}
130
131/// Wrapper around accessing a virtio header
132///
133/// For safety the members of the virtio header must be individually read and written using volatile
134/// accesses through a raw pointer, and we cannot construct a regular `&HeaderLayout`. Therefore
135/// this object wraps a raw pointer and provides safe accesses to the header fields.
136//
137// # Safety
138//
139// `base` must always be a non-null pointer that points to an array of two u16 values (i.e. it
140// must point to a HeaderLayout), that can be read and written from. This pointer must be known to
141// be valid for the lifetime 'a, making it valid for at least the lifetime of this object.
142#[derive(Clone)]
143struct Header<'a> {
144 base: *mut u16,
145 lifetime: PhantomData<&'a ()>,
146}
147
148impl<'a> Header<'a> {
149 /// Construct a [`Header`] wrapping the given [`HeaderLayout`]
150 ///
151 /// # Safety
152 ///
153 /// Behavior is undefined if:
154 /// - `layout` is not valid for reads or writes
155 /// - `layout` is not correctly aligned
156 /// - `layout` does not point to an object that lives for at least the lifetime `'a`
157 unsafe fn from_layout(layout: *mut HeaderLayout) -> Self {
158 // If layout is a valid pointer to a HeaderLayout, then it is also a valid pointer to an
159 // array of two u16 values, which is why we can do this cast and perform the offsetting that
160 // we do in `flags()` and `idx()`
161 Header { base: layout.cast(), lifetime: PhantomData }
162 }
163
164 // The returned pointer is guaranteed to be correctly aligned and valid for reads and writes.
165 fn flags(&self) -> *mut u16 {
166 // From the requirements in from_layout, base is a valid pointer to a HeaderLayout, and so
167 // offsetting it to the flags field must result in a valid pointer.
168 unsafe { self.base.add(HeaderLayout::FLAGS_OFFSET) }
169 }
170
171 // The returned pointer is guaranteed to be correctly aligned and valid for reads and writes.
172 fn idx(&self) -> *mut u16 {
173 // From the requirements in from_layout, base is a valid pointer to a HeaderLayout, and so
174 // offsetting it to the idx field must result in a valid pointer.
175 unsafe { self.base.add(HeaderLayout::IDX_OFFSET) }
176 }
177
178 fn are_notifications_suppressed(&self) -> bool {
179 // flags() is guaranteed to return a pointer that is aligned and valid for reading
180 unsafe { self.flags().read_volatile() == 1 }
181 }
182
183 fn load_idx(&self) -> u16 {
184 // idx() is guaranteed to return a pointer that is aligned and valid for reading
185 let result = unsafe { self.idx().read_volatile() };
186 atomic::fence(atomic::Ordering::Acquire);
187 result
188 }
189
190 fn store_idx(&self, idx: u16) {
191 atomic::fence(atomic::Ordering::Release);
192 // idx() is guaranteed to return a pointer that is aligned and valid for writing
193 unsafe { self.idx().write_volatile(idx) };
194 }
195
196 /// Changes flags to suppress notifications.
197 ///
198 /// Not permitted if VIRTIO_F_EVENT_IDX feature was negotiated.
199 /// This is not yet exposed for use.
200 #[allow(dead_code)]
201 fn suppress_notifications(&self) {
202 // flags() is guaranteed to return a pointer that is aligned and valid for writing
203 unsafe { self.flags().write_volatile(1) };
204 }
205
206 /// Change flags to enable notifications.
207 fn enable_notifications(&self) {
208 // flags() is guaranteed to return a pointer that is aligned and valid for writing
209 unsafe { self.flags().write_volatile(0) };
210 }
211}
212
213/// Representation of driver owned data.
214///
215/// Provides methods for safely querying, using appropriate memory barriers, items published by the
216/// driver.
217///
218/// Contents of this `struct` are not expected to be being modified in parallel by a driver in a
219/// guest, but as there is no way to guarantee guest behavior it is designed under the assumption of
220/// parallel modifications by a malicious guest.
221//
222// # Safety
223//
224// The pointers `desc` and `avail` are created and validated in [`new`](#new) to point to ranges of
225// memory that have at least `queue_size` valid objects in them, and are otherwise correctly aligned
226// and are valid to read from. `used_event_index` must be an aligned pointer that can be read from.
227//
228// All of the objects pointed to by `desc`, `avail` and `used_event_index` must remain valid for the
229// lifetime `'a`. It is the job of [`new`](#new) to take a [`DeviceRange`] and construct valid
230// pointers, and they, along with `queue_size`, should never be changed.
231//
232// The pointers are marked mutable so as to allow the `as_driver::Driver` to be implemented,
233// although the regular device implementation does not expose any way to perform writes.
234// `as_driver::Driver` has its own safety discussion.
235pub struct Driver<'a> {
236 desc: *mut Desc,
237 queue_size: u16,
238 avail_header: Header<'a>,
239 avail: *mut u16,
240 used_event_index: *mut u16,
241}
242
243impl<'a> Driver<'a> {
244 /// How many bytes the avail ring should be for the given `queue_size`.
245 ///
246 /// Provides an easy way to calculate the correct size of the range for passing to [`new`](#new)
247 pub const fn avail_len_for_queue_size(queue_size: u16) -> usize {
248 mem::size_of::<HeaderLayout>() + mem::size_of::<u16>() * (queue_size as usize + 1)
249 }
250
251 /// Construct a [`Driver`] using the provided memory for descriptor and available rings.
252 ///
253 /// Provided ranges must be correctly sized and aligned to represent the same power of two
254 /// queue size, otherwise a `None` is returned.
255 pub fn new<'b: 'a, 'c: 'a>(desc: DeviceRange<'b>, avail: DeviceRange<'c>) -> Option<Self> {
256 let queue_size = desc.len() / std::mem::size_of::<Desc>();
257 if !queue_size.is_power_of_two() {
258 return None;
259 }
260 let queue_size16: u16 = queue_size.try_into().ok()?;
261 // Here we calculated queue_size based on the length of desc, so we know that desc points to
262 // at least queue_size valid objects.
263 let desc = desc.try_mut_ptr()?;
264
265 let (avail_header, rest) = avail.split_at(mem::size_of::<HeaderLayout>())?;
266 // from_layout requires that the pointer we give it is correctly aligned, sized and lives
267 // long enough. try_mut_ptr will only return a Some() if avail_header is aligned and at
268 // least large enough for there to be a HeaderLayout. We also know that avail_header is
269 // valid for at least our lifetime of `'a`.
270 let avail_header = unsafe { Header::from_layout(avail_header.try_mut_ptr()?) };
271
272 // Reinterpret the rest as a [u16], with the last one being the used_event_index
273 if rest.len() != mem::size_of::<u16>() * (queue_size + 1) {
274 return None;
275 }
276
277 let avail: *mut u16 = rest.try_mut_ptr()?;
278 // We know that avail is an aligned pointer, as otherwise rest.try_mut_ptr() would have
279 // returned a none and the size of avail was just validated above to hold queue_size+1 items
280 let used_event_index = unsafe { avail.add(queue_size) };
281 // Building the final struct we know that our pointers; desc, avail and used_event_index,
282 // all point to sufficiently large objects for our queue_size16 that are aligned. As they
283 // were derived from DeviceRanges that have a lifetime of at least `'a`, we have fulfilled
284 // all the invariants defined on the struct.
285 Some(Self { desc, queue_size: queue_size16, avail_header, avail, used_event_index })
286 }
287
288 /// Query if a descriptor chain has been published with the given index.
289 ///
290 /// If a chain has been published by the driver then returns the index of the first descriptor
291 /// in the chain. Otherwise returns a `None`.
292 pub fn get_avail(&self, next_index: u16) -> Option<u16> {
293 if next_index != self.avail_header.load_idx() {
294 // The struct level invariant on `avail` and `queue_size` guarantee that this offset
295 // produces a readable value.
296 Some(unsafe { self.avail.add((next_index % self.queue_size).into()).read_volatile() })
297 } else {
298 None
299 }
300 }
301
302 /// Request a descriptor by index.
303 ///
304 /// Returns a none if the requested index is not within the range of the ring. Beyond this check
305 /// this method has no way to validate if the requested descriptor is valid and it is the
306 /// responsibility of the caller to know this.
307 pub fn get_desc(&self, index: u16) -> Option<Desc> {
308 if index < self.queue_size {
309 // The struct level invariant on `desc` and `queue_size` guarantee that this offset
310 // produces a readable value.
311 Some(unsafe { self.desc.add(index.into()).read_volatile() })
312 } else {
313 None
314 }
315 }
316
317 /// Determines if the driver has requested a notification for the given descriptor submission.
318 ///
319 /// Queries the information published by the driver to determine whether or not it would like a
320 /// notification for the given `submitted` descriptor by the [`Device`]. As the [`Driver`] holds
321 /// no host state whether the `VIRTIO_F_EVENT_IDX` feature was negotiated must be passed in.
322 pub fn needs_notification(&self, feature_event_idx: bool, submitted: u16) -> bool {
323 if feature_event_idx {
324 // The struct level invariant on `used_event_index` guarantee this this is readable.
325 submitted == unsafe { self.used_event_index.read_volatile() }
326 } else {
327 !self.avail_header.are_notifications_suppressed()
328 }
329 }
330
331 /// Returns the size of the descriptor and available rings.
332 ///
333 /// The descriptor and available rings are, by definition, the same size. This is just returning
334 /// the size that was calculated during [`new`](#new)
335 pub fn queue_size(&self) -> u16 {
336 self.queue_size
337 }
338}
339
340/// Representation of an entry in the used ring.
341///
342/// The only purpose [`Used`] has is to be passed to [insert_used](Device::insert_used) to be
343/// copied into the used ring. As a result the only provided method is [new](Used::new) and there
344/// are no accessors, as the driver is the one who will be accessing it.
345#[repr(C)]
346#[derive(Debug, Clone, Copy)]
347pub struct Used {
348 /// Index of start of used descriptor chain.
349 ///
350 /// For padding reasons the spec makes `id` in this structure is 32-bits, although it will never
351 /// exceed an actual 16-bit descriptor index.
352 id: u32,
353
354 /// Total length of the descriptor chain which was used (written to), in bytes.
355 len: u32,
356}
357
358impl Used {
359 /// Constructs a new used entry.
360 ///
361 /// `id` is the index of the first descriptor in the chain being returned and `len` is the
362 /// total number of bytes written to any writable descriptors in the chain.
363 pub fn new(id: u16, len: u32) -> Used {
364 Used { id: id.into(), len }
365 }
366}
367
368/// Represents the device owned data.
369///
370/// Contents of this struct are expected to be modified by the device and so are mutable. Provided
371/// methods allow for safely publishing data to the driver using appropriate memory barriers.
372///
373/// Although only the device is supposed to be modifying this data it is designed to account for a
374/// malicious guest performing modifications in parallel.
375//
376// # Safety
377//
378// The pointer `used` is created and validated in [`new`](#new) to point to ranges of memory that
379// have at least `queue_size` valid objects in them, and are otherwise correctly aligned and are
380// valid to write to. `avail_event_index` must be an aligned pointer that can be written.
381//
382// All of the objects pointed to by `used`, and `avail_event_index` must remain valid for the
383// lifetime `'a`. It is the job of [`new`](#new) to take a [`DeviceRange`] and construct valid
384// pointers, and they, along with `queue_size`, should never be changed.
385pub struct Device<'a> {
386 queue_size: u16,
387 used_header: Header<'a>,
388 used: *mut Used,
389
390 // Notification suppression is not yet exposed for use.
391 #[allow(dead_code)]
392 avail_event_index: *mut u16,
393}
394
395impl<'a> Device<'a> {
396 /// How many bytes the avail ring should be for the given `queue_size`.
397 ///
398 /// Provides an easy way to calculate the correct size of the slice for passing to [`new`](#new)
399 pub const fn used_len_for_queue_size(queue_size: u16) -> usize {
400 mem::size_of::<HeaderLayout>()
401 + mem::size_of::<Used>() * queue_size as usize
402 + mem::size_of::<u16>()
403 }
404
405 /// Construct a [`Device`] using the provided memory for the used ring.
406 ///
407 /// Provided range must be correctly sized and aligned to represent a power of two queue size,
408 /// otherwise a `None` is returned.
409 pub fn new<'b: 'a>(used: DeviceRange<'b>) -> Option<Self> {
410 let (used_header, rest) = used.split_at(mem::size_of::<HeaderLayout>())?;
411 // from_layout requires that the pointer we give it is correctly aligned, sized and lives
412 // long enough. try_mut_ptr will only return a Some() if avail_header is aligned and at
413 // least large enough for there to be a HeaderLayout. We also know that avail_header is
414 // valid for at least our lifetime of `'a`.
415 let used_header = unsafe { Header::from_layout(used_header.try_mut_ptr()?) };
416
417 // Take the last u16 from what is remaining as avail_event_index
418 if rest.len() < mem::size_of::<u16>() {
419 return None;
420 }
421
422 let queue_size = (rest.len() - mem::size_of::<u16>()) / mem::size_of::<Used>();
423 if !queue_size.is_power_of_two() {
424 return None;
425 }
426 let queue_size16: u16 = queue_size.try_into().ok()?;
427
428 let used: *mut Used = rest.try_mut_ptr()?;
429
430 // We know that used is an aligned pointer, as otherwise rest.try_mut_ptr() would have
431 // returned a none and the size of used was just validated above to hold queue_size+1 items
432 let avail_event_index = unsafe { used.add(queue_size).cast() };
433
434 // Start with notifications from the driver enabled by default.
435 used_header.enable_notifications();
436
437 // Building the final struct we know that our pointers; used and avail_event_index, all
438 // point to sufficiently large objects for our queue_size16 that are aligned. As they
439 // were derived from DeviceRanges that have a lifetime of at least `'a`, we have fulfilled
440 // all the invariants defined on the struct.
441 Some(Self { queue_size: queue_size16, used_header, used, avail_event_index })
442 }
443
444 /// Returns the size of the used ring.
445 pub fn queue_size(&self) -> u16 {
446 self.queue_size
447 }
448
449 /// Add a descriptor chain to the used ring.
450 ///
451 /// After calling this the descriptor is not yet visible to the driver. To make it visible
452 /// [`publish_used`](#publish_used) must be called. Chains are not implicitly published to allow
453 /// for batching the return of chains.
454 ///
455 /// To allow for passing the same `index` between this and [`publish_used`](#publish_used),
456 /// `index` here will automatically be wrapped to the queue length.
457 pub fn insert_used(&mut self, used: Used, index: u16) {
458 // The struct level invariant on `used` and `queue_size` guarantee that this offset
459 // produces a writable value.
460 unsafe { self.used.add((index % self.queue_size).into()).write_volatile(used) };
461 }
462
463 /// Publish the avail ring up to the provided `index` to the driver.
464 ///
465 /// This updates the driver visible index and performs appropriate memory barriers for the
466 /// driver to see any returned descriptors. It does not perform any kind of asynchronous
467 /// notification, such as an interrupt injection, to the guest or driver as that is a virtio
468 /// transport specific detail and is the responsibility of the caller to know how to do.
469 ///
470 /// Note that indices should not be wrapped by the caller to the queue length as they are
471 /// supposed to be free running and only wrap at the `u16` limit.
472 pub fn publish_used(&mut self, index: u16) {
473 self.used_header.store_idx(index);
474 }
475}
476
477/// Driver side access to rings for writing tests
478///
479/// This module provides helpers to access rings from the side of the driver, and not the device,
480/// which inverts the expectations on reading and writing. Provided here to reuse the [`Driver`]
481/// and [`Device`] definitions, and is only intended for consumption by the [`fake_queue`]
482/// (crate::fake_queue).
483///
484/// The helpers provided here are extremely minimal and low-level, and aim to the be the bare
485/// minimum to simulate the driver side of ring interactions for the purpose of writing unit-tests.
486pub(crate) mod as_driver {
487 use std::sync::atomic;
488
489 pub struct Device<'a>(super::Device<'a>);
490
491 impl<'a> Device<'a> {
492 pub fn new<'b: 'a>(device: &super::Device<'b>) -> Self {
493 // In constructing a new super::Device we have not broken any invariants on the original
494 // as we do not change any of the pointers or sizes, and ensure the original has at
495 // least as long a lifetime.
496 Self(super::Device {
497 queue_size: device.queue_size,
498 used_header: device.used_header.clone(),
499 used: device.used,
500 avail_event_index: device.avail_event_index,
501 })
502 }
503 pub fn read_idx(&self) -> u16 {
504 // Header::idx() is defined to always produce a pointer that may be read.
505 let result = unsafe { self.0.used_header.idx().read_volatile() };
506 atomic::fence(atomic::Ordering::Acquire);
507 result
508 }
509 pub fn read_used(&self, index: u16) -> super::Used {
510 // The struct invariant on super::Device guarantee this offset is valid and readable.
511 unsafe { self.0.used.add((index % self.0.queue_size).into()).read_volatile() }
512 }
513 }
514
515 pub struct Driver<'a>(super::Driver<'a>);
516
517 impl<'a> Driver<'a> {
518 pub fn new<'b: 'a>(driver: &super::Driver<'b>) -> Self {
519 // In constructing a new super::Driver we have not broken any invariants on the original
520 // as we do not change any of the pointers or sizes, and ensure the original has at
521 // least as long a lifetime.
522 Self(super::Driver {
523 desc: driver.desc,
524 queue_size: driver.queue_size,
525 avail_header: driver.avail_header.clone(),
526 avail: driver.avail,
527 used_event_index: driver.used_event_index,
528 })
529 }
530 pub fn write_desc(&mut self, index: u16, desc: super::Desc) {
531 // The struct invariant on super::Driver guarantee this offset is valid and writable.
532 unsafe { self.0.desc.add((index % self.0.queue_size).into()).write_volatile(desc) };
533 }
534 pub fn write_avail(&mut self, index: u16, val: u16) {
535 // The struct invariant on super::Driver guarantee this offset is valid and writable.
536 unsafe { self.0.avail.add((index % self.0.queue_size).into()).write_volatile(val) };
537 }
538 #[allow(unused)]
539 pub fn write_flags(&mut self, flags: u16) {
540 atomic::fence(atomic::Ordering::Release);
541 // Header::flags() is defined to always produce a pointer that may be written.
542 unsafe { self.0.avail_header.flags().write_volatile(flags) };
543 }
544 pub fn write_idx(&mut self, idx: u16) {
545 atomic::fence(atomic::Ordering::Release);
546 // Header::idx() is defined to always produce a pointer that may be written.
547 unsafe { self.0.avail_header.idx().write_volatile(idx) };
548 }
549 #[allow(unused)]
550 pub fn write_used_event_index(&mut self, index: u16) {
551 atomic::fence(atomic::Ordering::Release);
552 // The struct invariant on super::Driver guarantee this pointer is valid and writable.
553 unsafe { self.0.used_event_index.write_volatile(index) };
554 }
555 }
556
557 pub fn make_desc(addr: u64, len: u32, flags: u16, next: u16) -> super::Desc {
558 super::Desc { addr, len, flags, next }
559 }
560 pub fn deconstruct_used(used: super::Used) -> (u32, u32) {
561 (used.id, used.len)
562 }
563}
564
565#[cfg(test)]
566mod tests {
567 use super::*;
568 use crate::fake_queue::{Chain, FakeQueue, IdentityDriverMem};
569
570 #[test]
571 fn test_size() {
572 let driver_mem = IdentityDriverMem::new();
573 // Declare memory for queue size of 3, which is not a power of two.
574 let mem = driver_mem.alloc_queue_memory(3).unwrap();
575 assert!(Driver::new(mem.desc, mem.avail).is_none());
576 assert!(Device::new(mem.used).is_none());
577 // Differing, but otherwise valid, sizes for the two rings.
578 let mem = driver_mem.alloc_queue_memory(4).unwrap();
579 let mem2 = driver_mem.alloc_queue_memory(8).unwrap();
580 assert!(Driver::new(mem.desc, mem2.avail).is_none());
581 // Declare memory for queues with a queue size of 8, which is good.
582 let mem = driver_mem.alloc_queue_memory(8).unwrap();
583 assert!(Driver::new(mem.desc, mem.avail).is_some());
584 assert!(Device::new(mem.used).is_some());
585 }
586
587 #[test]
588 fn test_descriptor() {
589 let driver_mem = IdentityDriverMem::new();
590 let mem = driver_mem.alloc_queue_memory(128).unwrap();
591 let driver = Driver::new(mem.desc, mem.avail).unwrap();
592 let mut device = Device::new(mem.used).unwrap();
593 let mut fake_queue = FakeQueue::new(&driver, &device).unwrap();
594 // Check initial state.
595 assert!(driver.get_avail(0).is_none());
596 // Ask the fake driver to publish a couple of descriptor chains. We know where in the
597 // available list they must be placed, but not what descriptor index they will get.
598 let (avail0, first_desc0) =
599 fake_queue.publish(Chain::with_lengths(&[64, 64], &[], &driver_mem)).unwrap();
600 assert_eq!(avail0, 0);
601 assert_eq!(driver.get_avail(0), Some(first_desc0));
602 let (avail1, first_desc1) =
603 fake_queue.publish(Chain::with_lengths(&[32], &[48], &driver_mem)).unwrap();
604 assert_eq!(avail1, 1);
605 assert_eq!(driver.get_avail(0), Some(first_desc0));
606 assert_eq!(driver.get_avail(1), Some(first_desc1));
607 // Validate the two chains are what we expect them to be.
608 let desc = driver.get_desc(first_desc0).unwrap();
609 assert!(desc.has_next());
610 assert!(!desc.write_only());
611 assert_eq!(desc.data().1, 64);
612 let desc = driver.get_desc(desc.next().unwrap()).unwrap();
613 assert!(!desc.has_next());
614 assert!(!desc.write_only());
615 assert_eq!(desc.data().1, 64);
616
617 let desc = driver.get_desc(first_desc1).unwrap();
618 assert!(desc.has_next());
619 assert!(!desc.write_only());
620 assert_eq!(desc.data().1, 32);
621 let desc = driver.get_desc(desc.next().unwrap()).unwrap();
622 assert!(!desc.has_next());
623 assert!(desc.write_only());
624 assert_eq!(desc.data().1, 48);
625 // Return the chains in reverse order. Claim we wrote 16 bytes to the writable portion.
626 device.insert_used(Used::new(first_desc1, 16), 0);
627 device.insert_used(Used::new(first_desc0, 0), 1);
628 assert!(fake_queue.next_used().is_none());
629
630 // Publish at once.
631 device.publish_used(2);
632
633 // Should now be able to receive the descriptors back.
634 let chain = fake_queue.next_used().unwrap();
635 assert_eq!(chain.written(), 16);
636 let mut iter = chain.data_iter();
637 assert_eq!(iter.next().map(|(_, len)| len), Some(16));
638 assert!(iter.next().is_none());
639 let chain = fake_queue.next_used().unwrap();
640 assert_eq!(chain.written(), 0);
641 assert!(chain.data_iter().next().is_none());
642
643 // Should be nothing left.
644 assert!(fake_queue.next_used().is_none());
645 }
646}