netdevice_client/session/buffer/
mod.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Fuchsia netdevice buffer management.
6
7pub(super) mod pool;
8pub mod sys;
9
10use std::iter;
11use std::num::{NonZeroU16, NonZeroU64};
12use std::ops::{Deref, DerefMut};
13use std::ptr::NonNull;
14use std::sync::atomic::{AtomicU8, Ordering};
15
16use fidl_fuchsia_hardware_network as netdev;
17use fuchsia_runtime::vmar_root_self;
18use zx::sys::ZX_MIN_PAGE_SHIFT;
19
20use static_assertions::{const_assert, const_assert_eq};
21
22use crate::error::{Error, Result};
23use crate::session::Port;
24use types::{ChainLength, DESCID_NO_NEXT};
25
26pub use pool::{AllocKind, Buffer, Rx, Tx};
27/// Network device descriptor version.
28pub const NETWORK_DEVICE_DESCRIPTOR_VERSION: u8 = sys::__NETWORK_DEVICE_DESCRIPTOR_VERSION as u8;
29pub(super) use types::DescId;
30/// Network device descriptor length.
31pub(super) const NETWORK_DEVICE_DESCRIPTOR_LENGTH: usize =
32    std::mem::size_of::<sys::buffer_descriptor>();
33
34// Ensure that the descriptor length is always a multiple of 8.
35const_assert_eq!(NETWORK_DEVICE_DESCRIPTOR_LENGTH % std::mem::size_of::<u64>(), 0);
36// Ensure the alignment for BufferDescriptor allows accessing from page boundary. As pages are
37// always a power of 2, we just need to ensure the alignment is a power of 2, and that the alignment
38// is not larger than the minimum possible page size.
39const_assert!(
40    std::mem::align_of::<Descriptor<Tx>>().count_ones() == 1
41        && std::mem::align_of::<Descriptor<Tx>>() <= (1 << ZX_MIN_PAGE_SHIFT)
42);
43const_assert!(
44    std::mem::align_of::<Descriptor<Rx>>().count_ones() == 1
45        && std::mem::align_of::<Descriptor<Rx>>() <= (1 << ZX_MIN_PAGE_SHIFT)
46);
47
48/// A network device descriptor.
49#[repr(transparent)]
50struct Descriptor<K: AllocKind>(sys::buffer_descriptor, std::marker::PhantomData<K>);
51
52impl<K: AllocKind> Descriptor<K> {
53    fn frame_type(&self) -> Result<netdev::FrameType> {
54        let Self(this, _marker) = self;
55        let prim = this.frame_type;
56        netdev::FrameType::from_primitive(prim).ok_or(Error::FrameType(prim))
57    }
58
59    fn chain_length(&self) -> Result<ChainLength> {
60        let Self(this, _marker) = self;
61        ChainLength::try_from(this.chain_length)
62    }
63
64    fn nxt(&self) -> Option<u16> {
65        let Self(this, _marker) = self;
66        if this.nxt == DESCID_NO_NEXT {
67            None
68        } else {
69            Some(this.nxt)
70        }
71    }
72
73    fn set_nxt(&mut self, desc: Option<DescId<K>>) {
74        let Self(this, _marker) = self;
75        this.nxt = desc.as_ref().map(DescId::get).unwrap_or(DESCID_NO_NEXT);
76    }
77
78    fn offset(&self) -> u64 {
79        let Self(this, _marker) = self;
80        this.offset
81    }
82
83    fn set_offset(&mut self, offset: u64) {
84        let Self(this, _marker) = self;
85        this.offset = offset;
86    }
87
88    fn head_length(&self) -> u16 {
89        let Self(this, _marker) = self;
90        this.head_length
91    }
92
93    fn data_length(&self) -> u32 {
94        let Self(this, _marker) = self;
95        this.data_length
96    }
97
98    fn tail_length(&self) -> u16 {
99        let Self(this, _marker) = self;
100        this.tail_length
101    }
102
103    fn port(&self) -> Port {
104        let Self(
105            sys::buffer_descriptor {
106                port_id: sys::buffer_descriptor_port_id { base, salt }, ..
107            },
108            _marker,
109        ) = self;
110        Port { base: *base, salt: *salt }
111    }
112
113    fn set_port(&mut self, Port { base, salt }: Port) {
114        let Self(sys::buffer_descriptor { port_id, .. }, _marker) = self;
115        *port_id = sys::buffer_descriptor_port_id { base, salt };
116    }
117
118    /// Initializes a descriptor with the given layout.
119    fn initialize(&mut self, chain_len: ChainLength, head_len: u16, data_len: u32, tail_len: u16) {
120        let Self(
121            sys::buffer_descriptor {
122                frame_type,
123                chain_length,
124                // We shouldn't touch this field as it is always managed by the
125                // allocation routines.
126                nxt: _,
127                info_type,
128                port_id: sys::buffer_descriptor_port_id { base, salt },
129                // We shouldn't touch this field as it is reserved.
130                _reserved: _,
131                // We shouldn't touch this field as it is managed by DescRef{Mut}.
132                client_opaque_data: _,
133                // No need to initialize this because it is already initialized when
134                // `Descriptors` was created and it should always stay unchanged.
135                offset: _,
136                head_length,
137                tail_length,
138                data_length,
139                inbound_flags,
140                return_flags,
141            },
142            _marker,
143        ) = self;
144        *frame_type = 0;
145        *chain_length = chain_len.get();
146        *info_type = 0;
147        *base = 0;
148        *salt = 0;
149        *head_length = head_len;
150        *tail_length = tail_len;
151        *data_length = data_len;
152        *inbound_flags = 0;
153        *return_flags = 0;
154    }
155}
156
157impl Descriptor<Rx> {
158    fn rx_flags(&self) -> Result<netdev::RxFlags> {
159        let Self(this, _marker) = self;
160        let bits = this.inbound_flags;
161        netdev::RxFlags::from_bits(bits).ok_or(Error::RxFlags(bits))
162    }
163}
164
165impl Descriptor<Tx> {
166    fn set_tx_flags(&mut self, flags: netdev::TxFlags) {
167        let Self(this, _marker) = self;
168        let bits = flags.bits();
169        this.return_flags = bits;
170    }
171
172    fn set_frame_type(&mut self, frame_type: netdev::FrameType) {
173        let Self(this, _marker) = self;
174        this.frame_type = frame_type.into_primitive();
175    }
176
177    /// # Panics
178    ///
179    /// * `used` is larger than the capacity of of the buffer.
180    /// * `used` is so small that the resulting `tail` for the buffer is not
181    ///   representable by a u16.
182    fn commit(&mut self, used: u32) {
183        let Self(this, _marker) = self;
184        // The following addition can't overflow because
185        // data_length + tail_length <= buffer_length <= u32::MAX.
186        let total = this.data_length + u32::from(this.tail_length);
187        let tail = total.checked_sub(used).unwrap();
188        this.data_length = used;
189        this.tail_length = u16::try_from(tail).unwrap();
190    }
191}
192
193/// [`Descriptors`] is a slice of [`sys::buffer_descriptor`]s at mapped address.
194///
195/// A [`Descriptors`] owns a reference to its backing VMO, ensuring that its
196// refcount will not reach 0 until the `Descriptors` has been dropped.
197struct Descriptors {
198    ptr: NonNull<sys::buffer_descriptor>,
199    count: u16,
200}
201
202impl Descriptors {
203    /// Creates a new [`Descriptors`].
204    ///
205    /// Also returns the backing [`zx::Vmo`] and the available [`DescId`]s.
206    ///
207    /// # Panics
208    ///
209    /// * `buffer_stride * total` > u64::MAX.
210    fn new(
211        num_tx: NonZeroU16,
212        num_rx: NonZeroU16,
213        buffer_stride: NonZeroU64,
214    ) -> Result<(Self, zx::Vmo, Vec<DescId<Tx>>, Vec<DescId<Rx>>)> {
215        let total = num_tx.get() + num_rx.get();
216        let size = u64::try_from(NETWORK_DEVICE_DESCRIPTOR_LENGTH * usize::from(total))
217            .expect("vmo_size overflows u64");
218        let vmo = zx::Vmo::create(size).map_err(|status| Error::Vmo("descriptors", status))?;
219        // The unwrap is safe because it is guaranteed that the base address
220        // returned will be non-zero.
221        // https://fuchsia.dev/fuchsia-src/reference/syscalls/vmar_map
222        let ptr = NonNull::new(
223            vmar_root_self()
224                .map(
225                    0,
226                    &vmo,
227                    0,
228                    usize::try_from(size).unwrap(),
229                    zx::VmarFlags::PERM_WRITE | zx::VmarFlags::PERM_READ,
230                )
231                .map_err(|status| Error::Map("descriptors", status))?
232                as *mut sys::buffer_descriptor,
233        )
234        .unwrap();
235
236        // Safety: It is required that we don't have two `DescId`s with the same
237        // value. Below we create `total` DescId's and each of them will have
238        // a different value.
239        let mut tx =
240            (0..num_tx.get()).map(|x| unsafe { DescId::<Tx>::from_raw(x) }).collect::<Vec<_>>();
241        let mut rx =
242            (num_tx.get()..total).map(|x| unsafe { DescId::<Rx>::from_raw(x) }).collect::<Vec<_>>();
243        let descriptors = Self { ptr, count: total };
244        fn init_offset<K: AllocKind>(
245            descriptors: &Descriptors,
246            desc: &mut DescId<K>,
247            buffer_stride: NonZeroU64,
248        ) {
249            let offset = buffer_stride.get().checked_mul(u64::from(desc.get())).unwrap();
250            descriptors.borrow_mut(desc).set_offset(offset);
251        }
252        tx.iter_mut().for_each(|desc| init_offset(&descriptors, desc, buffer_stride));
253        rx.iter_mut().for_each(|desc| init_offset(&descriptors, desc, buffer_stride));
254        Ok((descriptors, vmo, tx, rx))
255    }
256
257    /// Gets an immutable reference to the [`Descriptor`] represented by the [`DescId`].
258    ///
259    /// See [`ref_state`] and [`DescId`] for details.
260    ///
261    /// # Panics
262    ///
263    /// Panics if the descriptor ID is larger than the total number of descriptors.
264    fn borrow<'a, 'b: 'a, K: AllocKind>(&'b self, id: &'a DescId<K>) -> DescRef<'a, K> {
265        assert!(
266            id.get() < self.count,
267            "descriptor index out of range: {} >= {}",
268            id.get(),
269            self.count
270        );
271        unsafe { DescRef::new(self.ptr.as_ptr().add(id.get().into())) }
272    }
273
274    /// Gets a mutable reference to the [`Descriptor`] represented by the [`DescId`].
275    ///
276    /// See [`ref_state`] and [`DescId`] for details.
277    ///
278    /// # Panics
279    ///
280    /// Panics if the descriptor ID is larger than the total number of descriptors.
281    fn borrow_mut<'a, 'b: 'a, K: AllocKind>(&'b self, id: &'a mut DescId<K>) -> DescRefMut<'a, K> {
282        assert!(
283            id.get() < self.count,
284            "descriptor index out of range: {} >= {}",
285            id.get(),
286            self.count
287        );
288        unsafe { DescRefMut::new(self.ptr.as_ptr().add(id.get().into())) }
289    }
290
291    /// Chains the descriptors returned by the device.
292    ///
293    /// The iteration will go on as long as `chain_length` is not 0. The iteration
294    /// will stop if `chain_length` is invalid (larger than [`netdev::MAX_DESCRIPTOR_CHAIN`]).
295    fn chain<K: AllocKind>(&self, head: DescId<K>) -> impl Iterator<Item = Result<DescId<K>>> + '_ {
296        iter::successors(
297            Some(Ok(head)),
298            move |curr: &Result<DescId<K>>| -> Option<Result<DescId<K>>> {
299                match curr {
300                    Err(_err) => None,
301                    Ok(curr) => {
302                        let descriptor = self.borrow(curr);
303                        match descriptor.chain_length() {
304                            Err(e) => Some(Err(e)),
305                            Ok(len) => {
306                                if len == ChainLength::ZERO {
307                                    None
308                                } else {
309                                    // Safety: non-zero chain length means we can read the
310                                    // the nxt field and we trust the device to give us a
311                                    // valid descriptor, which should not have the same value
312                                    // to any descriptor that we own.
313                                    descriptor.nxt().map(|id| Ok(unsafe { DescId::from_raw(id) }))
314                                }
315                            }
316                        }
317                    }
318                }
319            },
320        )
321    }
322}
323
324// Descriptors is safe to be sent among threads.
325unsafe impl Send for Descriptors {}
326// Descriptors is also Sync because the refcount is backed by an atomic integer.
327unsafe impl Sync for Descriptors {}
328
329impl Drop for Descriptors {
330    fn drop(&mut self) {
331        // descriptor should have a small size and count is max 512 for now,
332        // this can't overflow even on a 16-bit platform.
333        let len = NETWORK_DEVICE_DESCRIPTOR_LENGTH * usize::from(self.count);
334        let page_size = usize::try_from(zx::system_get_page_size()).unwrap();
335        let aligned = (len + page_size - 1) / page_size * page_size;
336        unsafe {
337            vmar_root_self()
338                .unmap(self.ptr.as_ptr() as usize, aligned)
339                .expect("failed to unmap VMO")
340        }
341    }
342}
343
344/// Gets the reference count of the [`Descriptor`].
345///
346/// # Safety
347///
348/// ptr must be valid for the entire 'a lifetime; No one else should ever access
349/// client_opaque_data field in this crate.
350///
351/// Note that the reference counting isn't necessary if all [`DescId`]s are
352/// different because the rust borrow checker can guarantee we can't have two
353/// conflicting references to the same descriptor at the same time. Though the
354/// proof would be hard and impossible to express within rust. Also we have to
355/// trust the driver to hand us sensible [`DescId`]s. The dynamic reference
356/// counting is an extra layer of check to catch program errors.
357///
358/// The reference count is stored in an [`AtomicU8`]. u8 should be enough
359/// because [`DescRef`] is not exported and we won't reborrow a descriptor
360/// repeatedly in this crate. In fact, there really should be at most one
361/// reference to a descriptor at a time most of the time in this crate, but we
362/// do the counting anyway to respect that it is possible to create multiple
363/// shared references to avoid surprises. The counter is incremented by 1 every
364/// time a shared reference is created and u8::MAX is stored for an exclusive
365/// reference. When creating a shared reference we check if the counter is
366/// smaller than u8::MAX-1, when creating an exclusive reference we check
367/// if the counter is 0.
368unsafe fn ref_count<'a>(ptr: *const sys::buffer_descriptor) -> &'a AtomicU8 {
369    // Safety: No one else in this crate can access ref_cnt. The intermediate
370    // &u8 reference we create is correctly aligned (alignment = 1) and
371    // dereferenceable (given ptr is a valid pointer).
372    const_assert_eq!(std::mem::align_of::<AtomicU8>(), std::mem::align_of::<u8>());
373    &*(&((*ptr).client_opaque_data[0]) as *const u8 as *const AtomicU8)
374}
375
376/// This value signals there currently is no references to the descriptor.
377const DESC_REF_UNUSED: u8 = 0;
378/// This value signals the descriptor is exclusively borrowed. Anything between
379/// [`DESC_REF_UNUSED`] and [`DESC_REF_EXCLUSIVE`] means there are multiple
380/// shared references to the descriptor.
381const DESC_REF_EXCLUSIVE: u8 = u8::MAX;
382
383/// A shared reference to a [`Descriptor`].
384struct DescRef<'a, K: AllocKind> {
385    ptr: &'a Descriptor<K>,
386}
387
388impl<K: AllocKind> DescRef<'_, K> {
389    /// Creates a new shared reference.
390    ///
391    /// # Safety
392    ///
393    /// The caller must make sure the pointer is correctly aligned and points to
394    /// valid memory. The underlying memory pointed by ptr must not not be freed
395    /// within the lifetime. The caller must also make sure there is no active
396    /// exclusive borrow during the lifetime of the returned [`DescRef`].
397    ///
398    /// # Panics
399    ///
400    /// Panics if there are too many shared references already (254 max) or
401    /// there's an exclusive reference.
402    unsafe fn new(ptr: *const sys::buffer_descriptor) -> Self {
403        let ref_cnt = ref_count(ptr);
404        let prev = ref_cnt.fetch_add(1, Ordering::AcqRel);
405        if prev == DESC_REF_EXCLUSIVE {
406            panic!("trying to create a shared reference when there is already a mutable reference");
407        }
408        if prev + 1 == DESC_REF_EXCLUSIVE {
409            panic!("there are too many shared references")
410        }
411        Self { ptr: &*(ptr as *const Descriptor<K>) }
412    }
413}
414
415impl<K: AllocKind> Drop for DescRef<'_, K> {
416    fn drop(&mut self) {
417        let ref_cnt = unsafe { ref_count(&self.ptr.0 as *const _) };
418        let prev = ref_cnt.fetch_sub(1, Ordering::AcqRel);
419        assert!(prev != DESC_REF_EXCLUSIVE && prev != DESC_REF_UNUSED);
420    }
421}
422
423impl<K: AllocKind> Deref for DescRef<'_, K> {
424    type Target = Descriptor<K>;
425
426    fn deref(&self) -> &Self::Target {
427        self.ptr
428    }
429}
430
431/// An exclusive reference to the descriptor.
432struct DescRefMut<'a, K: AllocKind> {
433    ptr: &'a mut Descriptor<K>,
434}
435
436impl<K: AllocKind> DescRefMut<'_, K> {
437    /// Creates a new exclusive reference.
438    ///
439    /// # Safety
440    ///
441    /// The caller must make sure the pointer is correctly aligned and points to
442    /// valid memory. The underlying memory pointed by ptr must not not be freed
443    /// within the lifetime. The caller must also make sure there is no other
444    /// active borrows during the lifetime of the returned [`DescRefMut`].
445    ///
446    /// # Panics
447    ///
448    /// Panics if the descriptor is borrowed.
449    unsafe fn new(ptr: *mut sys::buffer_descriptor) -> Self {
450        let ref_cnt = ref_count(ptr);
451        if let Err(prev) = ref_cnt.compare_exchange(
452            DESC_REF_UNUSED,
453            DESC_REF_EXCLUSIVE,
454            Ordering::AcqRel,
455            Ordering::Acquire,
456        ) {
457            panic!(
458                "trying to create an exclusive reference when there are other references: {}",
459                prev
460            );
461        }
462        Self { ptr: &mut *(ptr as *mut Descriptor<K>) }
463    }
464}
465
466impl<K: AllocKind> Drop for DescRefMut<'_, K> {
467    fn drop(&mut self) {
468        let ref_cnt = unsafe { ref_count(&self.ptr.0 as *const _) };
469        if let Err(prev) = ref_cnt.compare_exchange(
470            DESC_REF_EXCLUSIVE,
471            DESC_REF_UNUSED,
472            Ordering::AcqRel,
473            Ordering::Acquire,
474        ) {
475            panic!(
476                "we have a mutable reference while the descriptor is not exclusively borrowed: {}",
477                prev
478            );
479        }
480    }
481}
482
483impl<K: AllocKind> Deref for DescRefMut<'_, K> {
484    type Target = Descriptor<K>;
485
486    fn deref(&self) -> &Self::Target {
487        self.ptr
488    }
489}
490
491impl<K: AllocKind> DerefMut for DescRefMut<'_, K> {
492    fn deref_mut(&mut self) -> &mut Self::Target {
493        &mut *self.ptr
494    }
495}
496
497/// A module to encapsulate the witness types so that users cannot create them
498/// with struct literal syntax.
499mod types {
500    use super::{netdev, AllocKind, Error, Result};
501    use std::fmt::Debug;
502    use std::num::TryFromIntError;
503    use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
504
505    /// The identifier of a descriptor.
506    ///
507    /// It is considered as the owner of the underlying [`Descriptor`].
508    /// No two [`DescId`] with same value should co-exist at the same time at any
509    /// point of the program execution. Just as in normal rust an object should
510    /// not have two owners at the same time, except that rustc can't check this
511    /// for us, so creating a [`DescId`] is unsafe, it is the programmers job to
512    /// make sure [`DescId`]s don't alias, i.e., have different values.
513    ///
514    /// Also since DESCID_NO_NEXT(u16::MAX) is used to signal the end of a free
515    /// list, there should be no [`DescId`] holding that value.
516    #[derive(PartialEq, Eq, KnownLayout, FromBytes, IntoBytes, Immutable)]
517    #[repr(transparent)]
518    pub(in crate::session) struct DescId<K: AllocKind>(u16, std::marker::PhantomData<K>);
519
520    impl<K: AllocKind> Debug for DescId<K> {
521        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
522            let Self(id, _marker) = self;
523            f.debug_tuple(K::REFL.as_str()).field(id).finish()
524        }
525    }
526
527    /// This value signals the end of the free list. We might be able to get rid
528    /// of this but given the current restriction of zircon fifos, it's not a
529    /// big deal to scrafice one value in the value space, we can't have that
530    /// many descriptors anyway.
531    pub(super) const DESCID_NO_NEXT: u16 = u16::MAX;
532
533    impl<K: AllocKind> DescId<K> {
534        // Safety: The caller needs to make sure there is no other DescId's having
535        // the same value as `id` at the same time.
536        pub(super) unsafe fn from_raw(id: u16) -> Self {
537            assert_ne!(id, DESCID_NO_NEXT);
538            Self(id, std::marker::PhantomData)
539        }
540
541        pub(super) fn get(&self) -> u16 {
542            let Self(id, _marker) = self;
543            *id
544        }
545    }
546
547    /// A witness type that the wrapped length is less than `MAX_DESCRIPTOR_CHAIN`.
548    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd)]
549    pub(in crate::session) struct ChainLength(u8);
550
551    impl TryFrom<u8> for ChainLength {
552        type Error = Error;
553
554        fn try_from(value: u8) -> Result<Self> {
555            if value > netdev::MAX_DESCRIPTOR_CHAIN {
556                return Err(Error::LargeChain(value.into()));
557            }
558            Ok(ChainLength(value))
559        }
560    }
561
562    impl TryFrom<usize> for ChainLength {
563        type Error = Error;
564
565        fn try_from(value: usize) -> Result<Self> {
566            let value =
567                u8::try_from(value).map_err(|TryFromIntError { .. }| Error::LargeChain(value))?;
568            value.try_into()
569        }
570    }
571
572    impl From<ChainLength> for usize {
573        fn from(ChainLength(len): ChainLength) -> Self {
574            len.into()
575        }
576    }
577
578    impl ChainLength {
579        pub(super) const ZERO: Self = Self(0);
580
581        pub(super) fn get(&self) -> u8 {
582            let ChainLength(len) = self;
583            *len
584        }
585    }
586}
587
588#[cfg(test)]
589mod tests {
590    use super::*;
591    use assert_matches::assert_matches;
592
593    // Safety: These are safe because none of the values are zero.
594    const TX_BUFFERS: NonZeroU16 = NonZeroU16::new(1).unwrap();
595    const RX_BUFFERS: NonZeroU16 = NonZeroU16::new(2).unwrap();
596    const BUFFER_STRIDE: NonZeroU64 = NonZeroU64::new(4).unwrap();
597
598    #[test]
599    fn get_descriptor_after_vmo_write() {
600        let (descriptors, vmo, tx, rx) =
601            Descriptors::new(TX_BUFFERS, RX_BUFFERS, BUFFER_STRIDE).expect("create descriptors");
602        vmo.write(&[netdev::FrameType::Ethernet.into_primitive()][..], 0).expect("vmo write");
603        assert_eq!(tx.len(), TX_BUFFERS.get().into());
604        assert_eq!(rx.len(), RX_BUFFERS.get().into());
605        assert_eq!(
606            descriptors.borrow(&tx[0]).frame_type().expect("failed to get frame type"),
607            netdev::FrameType::Ethernet
608        );
609    }
610
611    #[test]
612    fn init_descriptor() {
613        const HEAD_LEN: u16 = 1;
614        const DATA_LEN: u32 = 2;
615        const TAIL_LEN: u16 = 3;
616        let (descriptors, _vmo, mut tx, _rx) =
617            Descriptors::new(TX_BUFFERS, RX_BUFFERS, BUFFER_STRIDE).expect("create descriptors");
618        {
619            let mut descriptor = descriptors.borrow_mut(&mut tx[0]);
620            descriptor.initialize(ChainLength::ZERO, HEAD_LEN, DATA_LEN, TAIL_LEN);
621        }
622
623        let got = descriptors.borrow(&tx[0]);
624        assert_eq!(got.chain_length().unwrap(), ChainLength::ZERO);
625        assert_eq!(got.offset(), 0);
626        assert_eq!(got.head_length(), HEAD_LEN);
627        assert_eq!(got.data_length(), DATA_LEN);
628        assert_eq!(got.tail_length(), TAIL_LEN);
629    }
630
631    #[test]
632    fn chain_length() {
633        for raw in 0..=netdev::MAX_DESCRIPTOR_CHAIN {
634            let got = ChainLength::try_from(raw)
635                .expect("the conversion should succeed with length <= MAX_DESCRIPTOR_CHAIN");
636            assert_eq!(got.get(), raw);
637        }
638
639        for raw in netdev::MAX_DESCRIPTOR_CHAIN + 1..u8::MAX {
640            assert_matches!(ChainLength::try_from(raw).expect_err("the conversion should fail with length > MAX_DESCRIPTOR_CHAIN"), Error::LargeChain(len) if len == raw.into());
641        }
642    }
643}