packet/
fragmented.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use core::ops::{Range, RangeBounds};
6
7use zerocopy::{SplitByteSlice, SplitByteSliceMut};
8
9use crate::canonicalize_range;
10
11/// A wrapper for a sequence of byte slices.
12///
13/// `FragmentedByteSlice` shares its underlying memory with the slice it was
14/// constructed from and, as a result, operations on a `FragmentedByteSlice` may
15/// mutate the backing slice.
16#[derive(Debug, Eq, PartialEq)]
17pub struct FragmentedByteSlice<'a, B: SplitByteSlice>(&'a mut [B]);
18
19/// A single byte slice fragment in a [`FragmentedByteSlice`].
20pub trait Fragment: SplitByteSlice {
21    /// Takes `n` bytes from the front of this fragment.
22    ///
23    /// After a call to `take_front(n)`, the fragment is `n` bytes shorter.
24    ///
25    /// Returns `None` if `n` is larger than the length of this `ByteSlice`.
26    fn take_front(&mut self, n: usize) -> Option<Self>;
27
28    /// Takes `n` bytes from the back of this fragment.
29    ///
30    /// After a call to `take_back(n)`, the fragment is `n` bytes shorter.
31    ///
32    /// Returns `None` if `n` is larger than the length of this `ByteSlice`.
33    fn take_back(&mut self, n: usize) -> Option<Self>;
34
35    /// Constructs a new empty `Fragment`.
36    fn empty() -> Self;
37}
38
39/// A type that can produce a `FragmentedByteSlice` view of itself.
40pub trait AsFragmentedByteSlice<B: Fragment> {
41    /// Generates a `FragmentedByteSlice` view of `self`.
42    fn as_fragmented_byte_slice(&mut self) -> FragmentedByteSlice<'_, B>;
43}
44
45impl<O, B> AsFragmentedByteSlice<B> for O
46where
47    B: Fragment,
48    O: AsMut<[B]>,
49{
50    fn as_fragmented_byte_slice(&mut self) -> FragmentedByteSlice<'_, B> {
51        FragmentedByteSlice::new(self.as_mut())
52    }
53}
54
55impl<'a> Fragment for &'a [u8] {
56    fn take_front(&mut self, n: usize) -> Option<Self> {
57        self.split_off(..n)
58    }
59
60    fn take_back(&mut self, n: usize) -> Option<Self> {
61        let split = <[u8]>::len(self).checked_sub(n)?;
62        Some(self.split_off(split..).unwrap())
63    }
64
65    fn empty() -> Self {
66        &[]
67    }
68}
69
70impl<'a> Fragment for &'a mut [u8] {
71    fn take_front(&mut self, n: usize) -> Option<Self> {
72        self.split_off_mut(..n)
73    }
74
75    fn take_back(&mut self, n: usize) -> Option<Self> {
76        let split = <[u8]>::len(self).checked_sub(n)?;
77        Some(self.split_off_mut(split..).unwrap())
78    }
79
80    fn empty() -> Self {
81        &mut []
82    }
83}
84
85impl<'a, B: 'a + Fragment> FragmentedByteSlice<'a, B> {
86    /// Constructs a new `FragmentedByteSlice` from `bytes`.
87    ///
88    /// It is important to note that `FragmentedByteSlice` takes a mutable
89    /// reference to a backing slice. Operations on the `FragmentedByteSlice`
90    /// may mutate `bytes` as an optimization to avoid extra allocations.
91    ///
92    /// Users are encouraged to treat slices used to construct
93    /// `FragmentedByteSlice`s as if they are not owned anymore and only serve
94    /// as (usually temporary) backing for a `FragmentedByteSlice`.
95    pub fn new(bytes: &'a mut [B]) -> Self {
96        Self(bytes)
97    }
98
99    /// Constructs a new empty `FragmentedByteSlice`.
100    pub fn new_empty() -> Self {
101        Self(&mut [])
102    }
103
104    /// Gets the total length, in bytes, of this `FragmentedByteSlice`.
105    pub fn len(&self) -> usize {
106        // TODO(brunodalbo) explore if caching the total length in a
107        // FragmentedByteSlice could be a worthy performance optimization.
108        self.0.iter().map(|x| x.len()).sum()
109    }
110
111    /// Returns `true` if the `FragmentedByteSlice` is empty.
112    pub fn is_empty(&self) -> bool {
113        self.len() == 0
114    }
115
116    /// Slices this `FragmentedByteSlice`, reducing it to only the bytes within
117    /// `range`.
118    ///
119    /// `slice` will mutate the backing slice by dropping or shrinking fragments
120    /// as necessary so the overall composition matches the requested `range`.
121    /// The returned `FragmentedByteSlice` uses the same (albeit possibly
122    /// modified) backing mutable slice reference as `self`.
123    ///
124    /// # Panics
125    ///
126    /// Panics if the provided `range` is not within the bounds of this
127    /// `FragmentedByteSlice`, or if the range is nonsensical (the end precedes
128    /// the start).
129    pub fn slice<R>(self, range: R) -> Self
130    where
131        R: RangeBounds<usize>,
132    {
133        let len = self.len();
134        let range = canonicalize_range(len, &range);
135        let mut bytes = self.0;
136        // c is the amount of bytes we need to discard from the beginning of the
137        // fragments.
138        let mut c = range.start;
139        while c != 0 {
140            let first = &mut bytes[0];
141            if first.len() > c {
142                // if the first fragment contains more than c bytes, just take
143                // c bytes out of its front and we're done.
144                let _: B = first.take_front(c).unwrap();
145                break;
146            } else {
147                // otherwise, just account for the first fragment's entire
148                // length and drop it.
149                c -= first.len();
150                bytes = &mut bytes[1..];
151            }
152        }
153        // c is the amount of bytes we need to discard from the end of the
154        // fragments.
155        let mut c = len - range.end;
156        while c != 0 {
157            let idx = bytes.len() - 1;
158            let last = &mut bytes[idx];
159            if last.len() > c {
160                // if the last fragment contains more than c bytes, just take
161                // c bytes out of its back and we're done.
162                let _: B = last.take_back(c).unwrap();
163                break;
164            } else {
165                // otherwise, just account for the last fragment's entire length
166                // and drop it.
167                c -= last.len();
168                bytes = &mut bytes[..idx];
169            }
170        }
171        Self(bytes)
172    }
173
174    /// Checks whether the contents of this `FragmentedByteSlice` are equal to
175    /// the contents of `other`.
176    pub fn eq_slice(&self, mut other: &[u8]) -> bool {
177        for x in self.0.iter() {
178            let x = x.as_ref();
179            if other.len() < x.len() || !x.eq(&other[..x.len()]) {
180                return false;
181            }
182            other = &other[x.len()..];
183        }
184        other.is_empty()
185    }
186
187    /// Iterates over all the bytes in this `FragmentedByteSlice`.
188    pub fn iter(&self) -> impl '_ + Iterator<Item = u8> {
189        self.0.iter().map(|x| x.iter()).flatten().copied()
190    }
191
192    /// Iterates over the fragments of this `FragmentedByteSlice`.
193    pub fn iter_fragments(&'a self) -> impl 'a + Iterator<Item = &'a [u8]> + Clone {
194        self.0.iter().map(|x| x.as_ref())
195    }
196
197    /// Copies all the bytes in `self` into the contiguous slice `dst`.
198    ///
199    /// # Panics
200    ///
201    /// Panics if `dst.len() != self.len()`.
202    pub fn copy_into_slice(&self, mut dst: &mut [u8]) {
203        for p in self.0.iter() {
204            let (tgt, nxt) = dst.split_at_mut(p.len());
205            tgt.copy_from_slice(p.as_ref());
206            dst = nxt;
207        }
208        assert_eq!(dst.len(), 0);
209    }
210
211    /// Returns a flattened version of this buffer, copying its contents into a
212    /// [`Vec`].
213    pub fn to_flattened_vec(&self) -> Vec<u8> {
214        let mut out = Vec::with_capacity(self.len());
215        for x in self.0.iter() {
216            out.extend_from_slice(x);
217        }
218        out
219    }
220
221    /// Creates an index tuple from a linear index `idx`.
222    ///
223    /// `get_index` creates a tuple index `(slice, byte)` where `slice` is the
224    /// index in the backing slice of slices and `byte` is the byte index in the
225    /// slice at `self.0[slice]` where `(slice, byte)` represents the `idx`th
226    /// byte in this `FragmentedByteSlice`.
227    ///
228    /// # Panics
229    ///
230    /// Panics if `idx` is out of bounds.
231    fn get_index(&self, mut idx: usize) -> (usize, usize) {
232        let mut a = 0;
233        while self.0[a].len() <= idx || self.0[a].len() == 0 {
234            idx -= self.0[a].len();
235            a += 1;
236        }
237        (a, idx)
238    }
239
240    /// Increments the index tuple `idx`.
241    ///
242    /// Increments the index tuple `idx` (see
243    /// [`FragmentedByteSlice::get_index`]) so it references the next byte.
244    /// `increment_index` will stop incrementing and just return a slice index
245    /// equal to the length of the backing slice if `idx` can't be incremented
246    /// anymore.
247    fn increment_index(&self, idx: &mut (usize, usize)) {
248        if self.0[idx.0].len() > (idx.1 + 1) {
249            idx.1 += 1;
250        } else {
251            idx.0 += 1;
252            // skip any empty slices:
253            while idx.0 < self.0.len() && self.0[idx.0].len() == 0 {
254                idx.0 += 1;
255            }
256            idx.1 = 0;
257        }
258    }
259
260    /// Decrements the index tuple `idx`.
261    ///
262    /// Decrements the index tuple `idx` (see
263    /// [`FragmentedByteSlice::get_index`]) so it references the previous byte.
264    /// `decrement_index` will wrap around to an invalid out of bounds index
265    /// (slice index is equal to the length of the backing slice) if `idx` is
266    /// pointing to the `0`th byte.
267    fn decrement_index(&self, idx: &mut (usize, usize)) {
268        if idx.1 == 0 {
269            if idx.0 == 0 {
270                idx.0 = self.0.len();
271                idx.1 = 0;
272                return;
273            }
274            idx.0 -= 1;
275            // skip any empty slices:
276            while idx.0 != 0 && self.0[idx.0].len() == 0 {
277                idx.0 -= 1;
278            }
279            if self.0[idx.0].len() != 0 {
280                idx.1 = self.0[idx.0].len() - 1;
281            } else {
282                idx.0 = self.0.len();
283                idx.1 = 0;
284            }
285        } else {
286            idx.1 -= 1;
287        }
288    }
289
290    /// Tries to convert this `FragmentedByteSlice` into a contiguous one.
291    ///
292    /// Returns `Ok` if `self`'s backing storage contains 0 or 1 byte slices,
293    /// and `Err` otherwise.
294    ///
295    /// If `self`'s backing storage contains 1 byte slice, that byte slice will
296    /// be replaced with an empty byte slice, and the original used to construct
297    /// the return value.
298    pub fn try_into_contiguous(self) -> Result<B, Self> {
299        if self.0.is_empty() {
300            Ok(B::empty())
301        } else if self.0.len() == 1 {
302            Ok(std::mem::replace(&mut self.0[0], B::empty()))
303        } else {
304            Err(self)
305        }
306    }
307
308    /// Tries to get a contiguous reference to this `FragmentedByteSlice`.
309    ///
310    /// Returns `Some` if `self`'s backing storage contains 0 or 1 byte slices,
311    /// and `None` otherwise.
312    pub fn try_get_contiguous(&self) -> Option<&[u8]> {
313        match &self.0 {
314            [] => Some(&[]),
315            [slc] => Some(slc),
316            _ => None,
317        }
318    }
319
320    /// Tries to split this `FragmentedByteSlice` into a contiguous prefix, a
321    /// (possibly fragmented) body, and a contiguous suffix.
322    ///
323    /// Returns `None` if it isn't possible to form a contiguous prefix and
324    /// suffix with the provided `range`.
325    ///
326    /// # Panics
327    ///
328    /// Panics if the range is out of bounds, or if the range is nonsensical
329    /// (the end precedes the start).
330    pub fn try_split_contiguous<R>(self, range: R) -> Option<(B, Self, B)>
331    where
332        R: RangeBounds<usize>,
333    {
334        let len = self.len();
335        let range = canonicalize_range(len, &range);
336        if len == 0 && range.start == 0 && range.end == 0 {
337            // If own length is zero and the requested body range is an empty
338            // body start at zero, avoid returning None in the call to
339            // last_mut() below.
340            return Some((B::empty(), FragmentedByteSlice(&mut []), B::empty()));
341        }
342
343        // take foot first, because if we have a single fragment, taking head
344        // first will mess with the index calculations.
345
346        let foot = self.0.last_mut()?;
347        let take = len - range.end;
348        if foot.len() < take {
349            return None;
350        }
351        let foot = foot.take_back(take).unwrap();
352
353        let head = self.0.first_mut()?;
354        if head.len() < range.start {
355            return None;
356        }
357        let head = head.take_front(range.start).unwrap();
358
359        Some((head, self, foot))
360    }
361}
362
363impl<'a, B: 'a + SplitByteSliceMut + Fragment> FragmentedByteSlice<'a, B> {
364    /// Iterates over mutable references to all the bytes in this
365    /// `FragmentedByteSlice`.
366    pub fn iter_mut(&mut self) -> impl '_ + Iterator<Item = &'_ mut u8> {
367        self.0.iter_mut().map(|x| x.iter_mut()).flatten()
368    }
369
370    /// Copies all the bytes in `src` to `self`.
371    ///
372    /// # Panics
373    ///
374    /// Panics if `self.len() != src.len()`.
375    pub fn copy_from_slice(&mut self, mut src: &[u8]) {
376        for p in self.0.iter_mut() {
377            let (cur, nxt) = src.split_at(p.len());
378            p.as_mut().copy_from_slice(cur);
379            src = nxt;
380        }
381        assert_eq!(src.len(), 0);
382    }
383
384    /// Copies all the bytes from another `FragmentedByteSlice` `other` into
385    /// `self`.
386    ///
387    /// # Panics
388    ///
389    /// Panics if `self.len() != other.len()`.
390    pub fn copy_from<BB>(&mut self, other: &FragmentedByteSlice<'_, BB>)
391    where
392        BB: SplitByteSlice,
393    {
394        // keep an iterator over the fragments in other.
395        let mut oth = other.0.iter().map(|z| z.as_ref());
396        // op is the current fragment in other we're copying from.
397        let mut op = oth.next();
398        for part in self.0.iter_mut() {
399            // p is the current fragment in self we're feeding bytes into.
400            let mut p = part.as_mut();
401            // iterate until this fragment is all consumed.
402            while !p.is_empty() {
403                // skip any empty slices in other.
404                while op.unwrap().is_empty() {
405                    op = oth.next();
406                }
407                // get the current fragment in other.
408                let k = op.unwrap();
409                if k.len() <= p.len() {
410                    // if k does not have enough bytes to fill p, copy what we
411                    // can, change p to the region that hasn't been updated, and
412                    // then fetch the next fragment from other.
413                    let (dst, nxt) = p.split_at_mut(k.len());
414                    dst.copy_from_slice(k.as_ref());
415                    p = nxt;
416                    op = oth.next();
417                } else {
418                    // Otherwise, copy the p.len() first bytes from k, and
419                    // modify op to keep the rest of the bytes in k.
420                    let (src, nxt) = k.split_at(p.len());
421                    p.copy_from_slice(src.as_ref());
422                    op = Some(nxt);
423                    // break from loop, p had all its bytes copied.
424                    break;
425                }
426            }
427        }
428        // If anything is left in our iterator, panic if it isn't an empty slice
429        // since the lengths must match.
430        while let Some(v) = op {
431            assert_eq!(v.len(), 0);
432            op = oth.next();
433        }
434    }
435
436    /// Copies elements from one part of the `FragmentedByteSlice` to another
437    /// part of itself.
438    ///
439    /// `src` is the range within `self` to copy from. `dst` is the starting
440    /// index of the range within `self` to copy to, which will have the same
441    /// length as `src`. The two ranges may overlap. The ends of the two ranges
442    /// must be less than or equal to `self.len()`.
443    ///
444    /// # Panics
445    ///
446    /// Panics if either the source or destination range is out of bounds, or if
447    /// `src` is nonsensical (its end precedes its start).
448    pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dst: usize) {
449        let Range { start, end } = canonicalize_range(self.len(), &src);
450        assert!(end >= start);
451        let len = end - start;
452        if start == dst || len == 0 {
453            // no work to do
454        } else if start > dst {
455            // copy front to back
456            let mut start = self.get_index(start);
457            let mut dst = self.get_index(dst);
458            for _ in 0..len {
459                self.0[dst.0][dst.1] = self.0[start.0][start.1];
460                self.increment_index(&mut start);
461                self.increment_index(&mut dst);
462            }
463        } else {
464            // copy back to front
465            let mut start = self.get_index(end - 1);
466            let mut dst = self.get_index(dst + len - 1);
467            for _ in 0..len {
468                self.0[dst.0][dst.1] = self.0[start.0][start.1];
469                self.decrement_index(&mut start);
470                self.decrement_index(&mut dst);
471            }
472        }
473    }
474
475    /// Attempts to get a contiguous mutable reference to this
476    /// `FragmentedByteSlice`.
477    ///
478    /// Returns `Some` if this `FragmentedByteSlice` is a single contiguous part
479    /// (or is empty). Returns `None` otherwise.
480    pub fn try_get_contiguous_mut(&mut self) -> Option<&mut [u8]> {
481        match &mut self.0 {
482            [] => Some(&mut []),
483            [slc] => Some(slc),
484            _ => None,
485        }
486    }
487}
488
489/// A [`FragmentedByteSlice`] backed by immutable byte slices.
490pub type FragmentedBytes<'a, 'b> = FragmentedByteSlice<'a, &'b [u8]>;
491/// A [`FragmentedByteSlice`] backed by mutable byte slices.
492pub type FragmentedBytesMut<'a, 'b> = FragmentedByteSlice<'a, &'b mut [u8]>;
493
494#[cfg(test)]
495mod tests {
496    use super::*;
497
498    /// Calls `f` with all the possible three way slicings of a non-mutable
499    /// buffer containing `[1,2,3,4,5]` (including cases with empty slices).
500    fn with_fragments<F: for<'a, 'b> FnMut(FragmentedBytes<'a, 'b>)>(mut f: F) {
501        let buff = [1_u8, 2, 3, 4, 5];
502        for i in 0..buff.len() {
503            for j in i..buff.len() {
504                let (a, x) = buff.split_at(i);
505                let (b, c) = x.split_at(j - i);
506                let mut frags = [a, b, c];
507                f(frags.as_fragmented_byte_slice());
508            }
509        }
510    }
511
512    /// Calls `f` with all the possible three way slicings of a non-mutable
513    /// buffer containing `[1,2,3,4,5]` (including cases with empty slices).
514    fn with_fragments_mut<F: for<'a, 'b> FnMut(FragmentedBytesMut<'a, 'b>)>(mut f: F) {
515        let buff = [1_u8, 2, 3, 4, 5];
516        for i in 0..buff.len() {
517            for j in i..buff.len() {
518                let mut buff = [1_u8, 2, 3, 4, 5];
519                let (a, x) = buff.split_at_mut(i);
520                let (b, c) = x.split_at_mut(j - i);
521                let mut frags = [a, b, c];
522                f(frags.as_fragmented_byte_slice());
523            }
524        }
525    }
526
527    #[test]
528    fn test_iter() {
529        // check iterator over different fragment permutations.
530        with_fragments(|bytes| {
531            let mut iter = bytes.iter();
532            for i in 1_u8..6 {
533                assert_eq!(iter.next().unwrap(), i);
534            }
535            assert!(iter.next().is_none());
536            assert!(iter.next().is_none());
537        });
538    }
539
540    #[test]
541    fn test_eq() {
542        // check equality over different fragment permutations.
543        with_fragments(|bytes| {
544            assert!(bytes.eq_slice([1_u8, 2, 3, 4, 5].as_ref()));
545            assert!(!bytes.eq_slice([1_u8, 2, 3, 4].as_ref()));
546            assert!(!bytes.eq_slice([1_u8, 2, 3, 4, 5, 6].as_ref()));
547            assert!(!bytes.eq_slice(&[]));
548        });
549
550        // check equality for the empty slice case.
551        let bytes = FragmentedBytes::new_empty();
552        assert!(!bytes.eq_slice([1_u8, 2, 3, 4, 5].as_ref()));
553        assert!(bytes.eq_slice(&[]));
554    }
555
556    #[test]
557    fn test_slice() {
558        // test all valid ranges with all possible permutations of a three way
559        // slice.
560        for i in 0..6 {
561            for j in i..6 {
562                with_fragments(|bytes| {
563                    let range = bytes.slice(i..j);
564                    let x = [1_u8, 2, 3, 4, 5];
565                    assert_eq!(&range.to_flattened_vec()[..], &x[i..j], "{}..{}", i, j);
566                });
567            }
568        }
569    }
570
571    #[test]
572    #[should_panic]
573    fn test_slice_out_of_range() {
574        // check that slicing out of range will panic
575        with_fragments(|bytes| {
576            let _ = bytes.slice(0..15);
577        });
578    }
579
580    #[test]
581    #[should_panic]
582    fn test_copy_into_slice_too_big() {
583        // check that copy_into_slice panics for different lengths.
584        with_fragments(|bytes| {
585            let mut slice = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
586            bytes.copy_into_slice(&mut slice[..]);
587        });
588    }
589
590    #[test]
591    #[should_panic]
592    fn test_copy_into_slice_too_small() {
593        // check that copy_into_slice panics for different lengths.
594        with_fragments(|bytes| {
595            let mut slice = [1, 2];
596            bytes.copy_into_slice(&mut slice[..]);
597        });
598    }
599
600    #[test]
601    fn test_copy_into_slice() {
602        // try copy_into_slice with all different fragment permutations.
603        with_fragments(|bytes| {
604            let mut slice = [0; 5];
605            bytes.copy_into_slice(&mut slice[..]);
606            assert_eq!(slice, &[1, 2, 3, 4, 5][..]);
607        });
608    }
609
610    #[test]
611    #[should_panic]
612    fn test_copy_from_slice_too_big() {
613        // check that copy_from_slice panics for different lengths.
614        with_fragments_mut(|mut bytes| {
615            let slice = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
616            bytes.copy_from_slice(&slice[..]);
617        });
618    }
619
620    #[test]
621    #[should_panic]
622    fn test_copy_from_slice_too_small() {
623        // check that copy_from_slice panics for different lengths.
624        with_fragments_mut(|mut bytes| {
625            let slice = [1, 2, 3];
626            bytes.copy_from_slice(&slice[..]);
627        });
628    }
629
630    #[test]
631    fn test_copy_from_slice() {
632        // test copy_from_slice with all fragment permutations.
633        with_fragments_mut(|mut bytes| {
634            let slice = [10, 20, 30, 40, 50];
635            bytes.copy_from_slice(&slice[..]);
636            assert_eq!(&bytes.to_flattened_vec()[..], &slice[..]);
637        });
638    }
639
640    #[test]
641    fn test_copy_from() {
642        // test copying from another FragmentedByteSlice, going over all
643        // fragment permutations for both src and dst.
644        with_fragments(|src| {
645            with_fragments_mut(|mut dst| {
646                // zer-out dst
647                dst.copy_from_slice(&[0; 5][..]);
648                dst.copy_from(&src);
649                assert_eq!(&dst.to_flattened_vec()[..], &[1_u8, 2, 3, 4, 5][..]);
650            })
651        });
652    }
653
654    #[test]
655    #[should_panic]
656    fn test_copy_from_too_long() {
657        // copying from another FragmentedByteSlice should panic if the lengths
658        // differ.
659        let mut a = [0; 2];
660        let mut b = [0; 2];
661        let mut frags = [a.as_mut(), b.as_mut()];
662        with_fragments(|src| {
663            frags.as_fragmented_byte_slice().copy_from(&src);
664        });
665    }
666
667    #[test]
668    #[should_panic]
669    fn test_copy_from_too_short() {
670        // copying from another FragmentedByteSlice should panic if the lengths
671        // differ.
672        let mut a = [0; 5];
673        let mut b = [0; 2];
674        let mut frags = [a.as_mut(), b.as_mut()];
675        with_fragments(|src| {
676            frags.as_fragmented_byte_slice().copy_from(&src);
677        });
678    }
679
680    #[test]
681    fn test_indexing() {
682        // Test the internal indexing functions over all fragment permutations.
683        with_fragments(|bytes| {
684            for i in 0..5 {
685                // check that get_index addresses the expected byte.
686                let mut idx = bytes.get_index(i);
687                assert_eq!(bytes.0[idx.0][idx.1], (i + 1) as u8);
688
689                // check that we can increase it correctly until the end of the
690                // buffer.
691                for j in 1..(6 - i - 1) {
692                    bytes.increment_index(&mut idx);
693                    assert_eq!(bytes.0[idx.0][idx.1], (i + j + 1) as u8);
694                }
695
696                // fetch the same index again.
697                let mut idx = bytes.get_index(i);
698                assert_eq!(bytes.0[idx.0][idx.1], (i + 1) as u8);
699
700                // check that we can decrease it correctly until the beginning
701                // of the buffer.
702                for j in 1..=i {
703                    bytes.decrement_index(&mut idx);
704                    assert_eq!(bytes.0[idx.0][idx.1], (i - j + 1) as u8);
705                }
706            }
707        });
708    }
709
710    #[test]
711    fn test_copy_within() {
712        with_fragments_mut(|mut bytes| {
713            // copy last half to beginning:
714            bytes.copy_within(3..5, 0);
715            assert_eq!(&bytes.to_flattened_vec()[..], &[4, 5, 3, 4, 5]);
716        });
717        with_fragments_mut(|mut bytes| {
718            // copy first half to end:
719            bytes.copy_within(0..2, 3);
720            assert_eq!(&bytes.to_flattened_vec()[..], &[1, 2, 3, 1, 2]);
721        });
722    }
723
724    #[test]
725    #[should_panic]
726    fn test_copy_within_src_out_of_bounds() {
727        with_fragments_mut(|mut bytes| {
728            // try to copy out of bounds
729            bytes.copy_within(3..15, 0);
730        });
731    }
732
733    #[test]
734    #[should_panic]
735    fn test_copy_within_dst_out_of_bounds() {
736        with_fragments_mut(|mut bytes| {
737            // try to copy out of bounds
738            bytes.copy_within(3..5, 15);
739        });
740    }
741
742    #[test]
743    #[should_panic]
744    fn test_copy_within_bad_range() {
745        with_fragments_mut(|mut bytes| {
746            // pass a bad range (end before start)
747            #[allow(clippy::reversed_empty_ranges)]
748            bytes.copy_within(5..3, 0);
749        });
750    }
751
752    #[test]
753    fn test_get_contiguous() {
754        // If we have fragments, get_contiguous should fail:
755        with_fragments_mut(|mut bytes| {
756            assert!(bytes.try_get_contiguous().is_none());
757            assert!(bytes.try_get_contiguous_mut().is_none());
758            assert!(bytes.try_into_contiguous().is_err());
759        });
760
761        // otherwise we should be able to get the contiguous bytes:
762        let mut single = [1_u8, 2, 3, 4, 5];
763        let mut single = [&mut single[..]];
764        let mut single = single.as_fragmented_byte_slice();
765        assert_eq!(single.try_get_contiguous().unwrap(), &[1, 2, 3, 4, 5][..]);
766        assert_eq!(single.try_get_contiguous_mut().unwrap(), &[1, 2, 3, 4, 5][..]);
767        assert_eq!(single.try_into_contiguous().unwrap(), &[1, 2, 3, 4, 5][..]);
768    }
769
770    #[test]
771    fn test_split_contiguous() {
772        let data = [1_u8, 2, 3, 4, 5, 6];
773
774        // try with a single continuous slice
775        let mut refs = [&data[..]];
776        let frag = refs.as_fragmented_byte_slice();
777        let (head, body, foot) = frag.try_split_contiguous(2..4).unwrap();
778        assert_eq!(head, &data[..2]);
779        assert_eq!(&body.to_flattened_vec()[..], &data[2..4]);
780        assert_eq!(foot, &data[4..]);
781
782        // try splitting just part of the header
783        let mut refs = [&data[0..3], &data[3..]];
784        let frag = refs.as_fragmented_byte_slice();
785        let (head, body, foot) = frag.try_split_contiguous(2..6).unwrap();
786        assert_eq!(head, &data[..2]);
787        assert_eq!(&body.to_flattened_vec()[..], &data[2..]);
788        assert!(foot.is_empty());
789
790        // try splitting just part of the footer
791        let mut refs = [&data[0..3], &data[3..]];
792        let frag = refs.as_fragmented_byte_slice();
793        let (head, body, foot) = frag.try_split_contiguous(..4).unwrap();
794        assert!(head.is_empty());
795        assert_eq!(&body.to_flattened_vec()[..], &data[..4]);
796        assert_eq!(foot, &data[4..]);
797
798        // try completely extracting both:
799        let mut refs = [&data[0..3], &data[3..]];
800        let frag = refs.as_fragmented_byte_slice();
801        let (head, body, foot) = frag.try_split_contiguous(3..3).unwrap();
802        assert_eq!(head, &data[0..3]);
803        assert_eq!(body.len(), 0);
804        assert_eq!(foot, &data[3..]);
805
806        // try getting contiguous bytes from an empty FragmentedByteSlice:
807        let frag = FragmentedBytes::new_empty();
808        let (head, body, foot) = frag.try_split_contiguous(..).unwrap();
809        assert!(head.is_empty());
810        assert!(body.is_empty());
811        assert!(foot.is_empty());
812    }
813
814    #[test]
815    #[should_panic]
816    fn test_split_contiguous_out_of_bounds() {
817        let data = [1_u8, 2, 3, 4, 5, 6];
818        let mut refs = [&data[..]];
819        let frag = refs.as_fragmented_byte_slice();
820        let _ = frag.try_split_contiguous(2..8);
821    }
822
823    #[test]
824    fn test_empty() {
825        // Can create empty FragmentedByteSlices with no fragments or with one
826        // empty fragment.
827        // is_empty should return true for both cases.
828        let empty = FragmentedByteSlice::<&'static [u8]>::new_empty();
829        assert!(empty.is_empty());
830        let empty = [0_u8; 0];
831        let mut empty = [&empty[..]];
832        let empty = empty.as_fragmented_byte_slice();
833        assert!(empty.is_empty());
834    }
835}