packet/
fragmented.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use core::ops::{Range, RangeBounds};
6
7use zerocopy::{SplitByteSlice, SplitByteSliceMut};
8
9use crate::{canonicalize_range, take_back, take_back_mut, take_front, take_front_mut};
10
11/// A wrapper for a sequence of byte slices.
12///
13/// `FragmentedByteSlice` shares its underlying memory with the slice it was
14/// constructed from and, as a result, operations on a `FragmentedByteSlice` may
15/// mutate the backing slice.
16#[derive(Debug, Eq, PartialEq)]
17pub struct FragmentedByteSlice<'a, B: SplitByteSlice>(&'a mut [B]);
18
19/// A single byte slice fragment in a [`FragmentedByteSlice`].
20pub trait Fragment: SplitByteSlice {
21    /// Takes `n` bytes from the front of this fragment.
22    ///
23    /// After a call to `take_front(n)`, the fragment is `n` bytes shorter.
24    ///
25    /// # Panics
26    ///
27    /// Panics if `n` is larger than the length of this `ByteSlice`.
28    fn take_front(&mut self, n: usize) -> Self;
29
30    /// Takes `n` bytes from the back of this fragment.
31    ///
32    /// After a call to `take_back(n)`, the fragment is `n` bytes shorter.
33    ///
34    /// # Panics
35    ///
36    /// Panics if `n` is larger than the length of this `ByteSlice`.
37    fn take_back(&mut self, n: usize) -> Self;
38
39    /// Constructs a new empty `Fragment`.
40    fn empty() -> Self;
41}
42
43/// A type that can produce a `FragmentedByteSlice` view of itself.
44pub trait AsFragmentedByteSlice<B: Fragment> {
45    /// Generates a `FragmentedByteSlice` view of `self`.
46    fn as_fragmented_byte_slice(&mut self) -> FragmentedByteSlice<'_, B>;
47}
48
49impl<O, B> AsFragmentedByteSlice<B> for O
50where
51    B: Fragment,
52    O: AsMut<[B]>,
53{
54    fn as_fragmented_byte_slice(&mut self) -> FragmentedByteSlice<'_, B> {
55        FragmentedByteSlice::new(self.as_mut())
56    }
57}
58
59impl<'a> Fragment for &'a [u8] {
60    fn take_front(&mut self, n: usize) -> Self {
61        take_front(self, n)
62    }
63
64    fn take_back(&mut self, n: usize) -> Self {
65        take_back(self, n)
66    }
67
68    fn empty() -> Self {
69        &[]
70    }
71}
72
73impl<'a> Fragment for &'a mut [u8] {
74    fn take_front(&mut self, n: usize) -> Self {
75        take_front_mut(self, n)
76    }
77
78    fn take_back(&mut self, n: usize) -> Self {
79        take_back_mut(self, n)
80    }
81
82    fn empty() -> Self {
83        &mut []
84    }
85}
86
87impl<'a, B: 'a + Fragment> FragmentedByteSlice<'a, B> {
88    /// Constructs a new `FragmentedByteSlice` from `bytes`.
89    ///
90    /// It is important to note that `FragmentedByteSlice` takes a mutable
91    /// reference to a backing slice. Operations on the `FragmentedByteSlice`
92    /// may mutate `bytes` as an optimization to avoid extra allocations.
93    ///
94    /// Users are encouraged to treat slices used to construct
95    /// `FragmentedByteSlice`s as if they are not owned anymore and only serve
96    /// as (usually temporary) backing for a `FragmentedByteSlice`.
97    pub fn new(bytes: &'a mut [B]) -> Self {
98        Self(bytes)
99    }
100
101    /// Constructs a new empty `FragmentedByteSlice`.
102    pub fn new_empty() -> Self {
103        Self(&mut [])
104    }
105
106    /// Gets the total length, in bytes, of this `FragmentedByteSlice`.
107    pub fn len(&self) -> usize {
108        // TODO(brunodalbo) explore if caching the total length in a
109        // FragmentedByteSlice could be a worthy performance optimization.
110        self.0.iter().map(|x| x.len()).sum()
111    }
112
113    /// Returns `true` if the `FragmentedByteSlice` is empty.
114    pub fn is_empty(&self) -> bool {
115        self.len() == 0
116    }
117
118    /// Slices this `FragmentedByteSlice`, reducing it to only the bytes within
119    /// `range`.
120    ///
121    /// `slice` will mutate the backing slice by dropping or shrinking fragments
122    /// as necessary so the overall composition matches the requested `range`.
123    /// The returned `FragmentedByteSlice` uses the same (albeit possibly
124    /// modified) backing mutable slice reference as `self`.
125    ///
126    /// # Panics
127    ///
128    /// Panics if the provided `range` is not within the bounds of this
129    /// `FragmentedByteSlice`, or if the range is nonsensical (the end precedes
130    /// the start).
131    pub fn slice<R>(self, range: R) -> Self
132    where
133        R: RangeBounds<usize>,
134    {
135        let len = self.len();
136        let range = canonicalize_range(len, &range);
137        let mut bytes = self.0;
138        // c is the amount of bytes we need to discard from the beginning of the
139        // fragments.
140        let mut c = range.start;
141        while c != 0 {
142            let first = &mut bytes[0];
143            if first.len() > c {
144                // if the first fragment contains more than c bytes, just take
145                // c bytes out of its front and we're done.
146                let _: B = first.take_front(c);
147                break;
148            } else {
149                // otherwise, just account for the first fragment's entire
150                // length and drop it.
151                c -= first.len();
152                bytes = &mut bytes[1..];
153            }
154        }
155        // c is the amount of bytes we need to discard from the end of the
156        // fragments.
157        let mut c = len - range.end;
158        while c != 0 {
159            let idx = bytes.len() - 1;
160            let last = &mut bytes[idx];
161            if last.len() > c {
162                // if the last fragment contains more than c bytes, just take
163                // c bytes out of its back and we're done.
164                let _: B = last.take_back(c);
165                break;
166            } else {
167                // otherwise, just account for the last fragment's entire length
168                // and drop it.
169                c -= last.len();
170                bytes = &mut bytes[..idx];
171            }
172        }
173        Self(bytes)
174    }
175
176    /// Checks whether the contents of this `FragmentedByteSlice` are equal to
177    /// the contents of `other`.
178    pub fn eq_slice(&self, mut other: &[u8]) -> bool {
179        for x in self.0.iter() {
180            let x = x.as_ref();
181            if other.len() < x.len() || !x.eq(&other[..x.len()]) {
182                return false;
183            }
184            other = &other[x.len()..];
185        }
186        other.is_empty()
187    }
188
189    /// Iterates over all the bytes in this `FragmentedByteSlice`.
190    pub fn iter(&self) -> impl '_ + Iterator<Item = u8> {
191        self.0.iter().map(|x| x.iter()).flatten().copied()
192    }
193
194    /// Iterates over the fragments of this `FragmentedByteSlice`.
195    pub fn iter_fragments(&'a self) -> impl 'a + Iterator<Item = &'a [u8]> + Clone {
196        self.0.iter().map(|x| x.as_ref())
197    }
198
199    /// Copies all the bytes in `self` into the contiguous slice `dst`.
200    ///
201    /// # Panics
202    ///
203    /// Panics if `dst.len() != self.len()`.
204    pub fn copy_into_slice(&self, mut dst: &mut [u8]) {
205        for p in self.0.iter() {
206            let (tgt, nxt) = dst.split_at_mut(p.len());
207            tgt.copy_from_slice(p.as_ref());
208            dst = nxt;
209        }
210        assert_eq!(dst.len(), 0);
211    }
212
213    /// Returns a flattened version of this buffer, copying its contents into a
214    /// [`Vec`].
215    pub fn to_flattened_vec(&self) -> Vec<u8> {
216        let mut out = Vec::with_capacity(self.len());
217        for x in self.0.iter() {
218            out.extend_from_slice(x);
219        }
220        out
221    }
222
223    /// Creates an index tuple from a linear index `idx`.
224    ///
225    /// `get_index` creates a tuple index `(slice, byte)` where `slice` is the
226    /// index in the backing slice of slices and `byte` is the byte index in the
227    /// slice at `self.0[slice]` where `(slice, byte)` represents the `idx`th
228    /// byte in this `FragmentedByteSlice`.
229    ///
230    /// # Panics
231    ///
232    /// Panics if `idx` is out of bounds.
233    fn get_index(&self, mut idx: usize) -> (usize, usize) {
234        let mut a = 0;
235        while self.0[a].len() <= idx || self.0[a].len() == 0 {
236            idx -= self.0[a].len();
237            a += 1;
238        }
239        (a, idx)
240    }
241
242    /// Increments the index tuple `idx`.
243    ///
244    /// Increments the index tuple `idx` (see
245    /// [`FragmentedByteSlice::get_index`]) so it references the next byte.
246    /// `increment_index` will stop incrementing and just return a slice index
247    /// equal to the length of the backing slice if `idx` can't be incremented
248    /// anymore.
249    fn increment_index(&self, idx: &mut (usize, usize)) {
250        if self.0[idx.0].len() > (idx.1 + 1) {
251            idx.1 += 1;
252        } else {
253            idx.0 += 1;
254            // skip any empty slices:
255            while idx.0 < self.0.len() && self.0[idx.0].len() == 0 {
256                idx.0 += 1;
257            }
258            idx.1 = 0;
259        }
260    }
261
262    /// Decrements the index tuple `idx`.
263    ///
264    /// Decrements the index tuple `idx` (see
265    /// [`FragmentedByteSlice::get_index`]) so it references the previous byte.
266    /// `decrement_index` will wrap around to an invalid out of bounds index
267    /// (slice index is equal to the length of the backing slice) if `idx` is
268    /// pointing to the `0`th byte.
269    fn decrement_index(&self, idx: &mut (usize, usize)) {
270        if idx.1 == 0 {
271            if idx.0 == 0 {
272                idx.0 = self.0.len();
273                idx.1 = 0;
274                return;
275            }
276            idx.0 -= 1;
277            // skip any empty slices:
278            while idx.0 != 0 && self.0[idx.0].len() == 0 {
279                idx.0 -= 1;
280            }
281            if self.0[idx.0].len() != 0 {
282                idx.1 = self.0[idx.0].len() - 1;
283            } else {
284                idx.0 = self.0.len();
285                idx.1 = 0;
286            }
287        } else {
288            idx.1 -= 1;
289        }
290    }
291
292    /// Tries to convert this `FragmentedByteSlice` into a contiguous one.
293    ///
294    /// Returns `Ok` if `self`'s backing storage contains 0 or 1 byte slices,
295    /// and `Err` otherwise.
296    ///
297    /// If `self`'s backing storage contains 1 byte slice, that byte slice will
298    /// be replaced with an empty byte slice, and the original used to construct
299    /// the return value.
300    pub fn try_into_contiguous(self) -> Result<B, Self> {
301        if self.0.is_empty() {
302            Ok(B::empty())
303        } else if self.0.len() == 1 {
304            Ok(std::mem::replace(&mut self.0[0], B::empty()))
305        } else {
306            Err(self)
307        }
308    }
309
310    /// Tries to get a contiguous reference to this `FragmentedByteSlice`.
311    ///
312    /// Returns `Some` if `self`'s backing storage contains 0 or 1 byte slices,
313    /// and `None` otherwise.
314    pub fn try_get_contiguous(&self) -> Option<&[u8]> {
315        match &self.0 {
316            [] => Some(&[]),
317            [slc] => Some(slc),
318            _ => None,
319        }
320    }
321
322    /// Tries to split this `FragmentedByteSlice` into a contiguous prefix, a
323    /// (possibly fragmented) body, and a contiguous suffix.
324    ///
325    /// Returns `None` if it isn't possible to form a contiguous prefix and
326    /// suffix with the provided `range`.
327    ///
328    /// # Panics
329    ///
330    /// Panics if the range is out of bounds, or if the range is nonsensical
331    /// (the end precedes the start).
332    pub fn try_split_contiguous<R>(self, range: R) -> Option<(B, Self, B)>
333    where
334        R: RangeBounds<usize>,
335    {
336        let len = self.len();
337        let range = canonicalize_range(len, &range);
338        if len == 0 && range.start == 0 && range.end == 0 {
339            // If own length is zero and the requested body range is an empty
340            // body start at zero, avoid returning None in the call to
341            // last_mut() below.
342            return Some((B::empty(), FragmentedByteSlice(&mut []), B::empty()));
343        }
344
345        // take foot first, because if we have a single fragment, taking head
346        // first will mess with the index calculations.
347
348        let foot = self.0.last_mut()?;
349        let take = len - range.end;
350        if foot.len() < take {
351            return None;
352        }
353        let foot = foot.take_back(take);
354
355        let head = self.0.first_mut()?;
356        if head.len() < range.start {
357            return None;
358        }
359        let head = head.take_front(range.start);
360
361        Some((head, self, foot))
362    }
363}
364
365impl<'a, B: 'a + SplitByteSliceMut + Fragment> FragmentedByteSlice<'a, B> {
366    /// Iterates over mutable references to all the bytes in this
367    /// `FragmentedByteSlice`.
368    pub fn iter_mut(&mut self) -> impl '_ + Iterator<Item = &'_ mut u8> {
369        self.0.iter_mut().map(|x| x.iter_mut()).flatten()
370    }
371
372    /// Copies all the bytes in `src` to `self`.
373    ///
374    /// # Panics
375    ///
376    /// Panics if `self.len() != src.len()`.
377    pub fn copy_from_slice(&mut self, mut src: &[u8]) {
378        for p in self.0.iter_mut() {
379            let (cur, nxt) = src.split_at(p.len());
380            p.as_mut().copy_from_slice(cur);
381            src = nxt;
382        }
383        assert_eq!(src.len(), 0);
384    }
385
386    /// Copies all the bytes from another `FragmentedByteSlice` `other` into
387    /// `self`.
388    ///
389    /// # Panics
390    ///
391    /// Panics if `self.len() != other.len()`.
392    pub fn copy_from<BB>(&mut self, other: &FragmentedByteSlice<'_, BB>)
393    where
394        BB: SplitByteSlice,
395    {
396        // keep an iterator over the fragments in other.
397        let mut oth = other.0.iter().map(|z| z.as_ref());
398        // op is the current fragment in other we're copying from.
399        let mut op = oth.next();
400        for part in self.0.iter_mut() {
401            // p is the current fragment in self we're feeding bytes into.
402            let mut p = part.as_mut();
403            // iterate until this fragment is all consumed.
404            while !p.is_empty() {
405                // skip any empty slices in other.
406                while op.unwrap().is_empty() {
407                    op = oth.next();
408                }
409                // get the current fragment in other.
410                let k = op.unwrap();
411                if k.len() <= p.len() {
412                    // if k does not have enough bytes to fill p, copy what we
413                    // can, change p to the region that hasn't been updated, and
414                    // then fetch the next fragment from other.
415                    let (dst, nxt) = p.split_at_mut(k.len());
416                    dst.copy_from_slice(k.as_ref());
417                    p = nxt;
418                    op = oth.next();
419                } else {
420                    // Otherwise, copy the p.len() first bytes from k, and
421                    // modify op to keep the rest of the bytes in k.
422                    let (src, nxt) = k.split_at(p.len());
423                    p.copy_from_slice(src.as_ref());
424                    op = Some(nxt);
425                    // break from loop, p had all its bytes copied.
426                    break;
427                }
428            }
429        }
430        // If anything is left in our iterator, panic if it isn't an empty slice
431        // since the lengths must match.
432        while let Some(v) = op {
433            assert_eq!(v.len(), 0);
434            op = oth.next();
435        }
436    }
437
438    /// Copies elements from one part of the `FragmentedByteSlice` to another
439    /// part of itself.
440    ///
441    /// `src` is the range within `self` to copy from. `dst` is the starting
442    /// index of the range within `self` to copy to, which will have the same
443    /// length as `src`. The two ranges may overlap. The ends of the two ranges
444    /// must be less than or equal to `self.len()`.
445    ///
446    /// # Panics
447    ///
448    /// Panics if either the source or destination range is out of bounds, or if
449    /// `src` is nonsensical (its end precedes its start).
450    pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dst: usize) {
451        let Range { start, end } = canonicalize_range(self.len(), &src);
452        assert!(end >= start);
453        let len = end - start;
454        if start == dst || len == 0 {
455            // no work to do
456        } else if start > dst {
457            // copy front to back
458            let mut start = self.get_index(start);
459            let mut dst = self.get_index(dst);
460            for _ in 0..len {
461                self.0[dst.0][dst.1] = self.0[start.0][start.1];
462                self.increment_index(&mut start);
463                self.increment_index(&mut dst);
464            }
465        } else {
466            // copy back to front
467            let mut start = self.get_index(end - 1);
468            let mut dst = self.get_index(dst + len - 1);
469            for _ in 0..len {
470                self.0[dst.0][dst.1] = self.0[start.0][start.1];
471                self.decrement_index(&mut start);
472                self.decrement_index(&mut dst);
473            }
474        }
475    }
476
477    /// Attempts to get a contiguous mutable reference to this
478    /// `FragmentedByteSlice`.
479    ///
480    /// Returns `Some` if this `FragmentedByteSlice` is a single contiguous part
481    /// (or is empty). Returns `None` otherwise.
482    pub fn try_get_contiguous_mut(&mut self) -> Option<&mut [u8]> {
483        match &mut self.0 {
484            [] => Some(&mut []),
485            [slc] => Some(slc),
486            _ => None,
487        }
488    }
489}
490
491/// A [`FragmentedByteSlice`] backed by immutable byte slices.
492pub type FragmentedBytes<'a, 'b> = FragmentedByteSlice<'a, &'b [u8]>;
493/// A [`FragmentedByteSlice`] backed by mutable byte slices.
494pub type FragmentedBytesMut<'a, 'b> = FragmentedByteSlice<'a, &'b mut [u8]>;
495
496#[cfg(test)]
497mod tests {
498    use super::*;
499
500    /// Calls `f` with all the possible three way slicings of a non-mutable
501    /// buffer containing `[1,2,3,4,5]` (including cases with empty slices).
502    fn with_fragments<F: for<'a, 'b> FnMut(FragmentedBytes<'a, 'b>)>(mut f: F) {
503        let buff = [1_u8, 2, 3, 4, 5];
504        for i in 0..buff.len() {
505            for j in i..buff.len() {
506                let (a, x) = buff.split_at(i);
507                let (b, c) = x.split_at(j - i);
508                let mut frags = [a, b, c];
509                f(frags.as_fragmented_byte_slice());
510            }
511        }
512    }
513
514    /// Calls `f` with all the possible three way slicings of a non-mutable
515    /// buffer containing `[1,2,3,4,5]` (including cases with empty slices).
516    fn with_fragments_mut<F: for<'a, 'b> FnMut(FragmentedBytesMut<'a, 'b>)>(mut f: F) {
517        let buff = [1_u8, 2, 3, 4, 5];
518        for i in 0..buff.len() {
519            for j in i..buff.len() {
520                let mut buff = [1_u8, 2, 3, 4, 5];
521                let (a, x) = buff.split_at_mut(i);
522                let (b, c) = x.split_at_mut(j - i);
523                let mut frags = [a, b, c];
524                f(frags.as_fragmented_byte_slice());
525            }
526        }
527    }
528
529    #[test]
530    fn test_iter() {
531        // check iterator over different fragment permutations.
532        with_fragments(|bytes| {
533            let mut iter = bytes.iter();
534            for i in 1_u8..6 {
535                assert_eq!(iter.next().unwrap(), i);
536            }
537            assert!(iter.next().is_none());
538            assert!(iter.next().is_none());
539        });
540    }
541
542    #[test]
543    fn test_eq() {
544        // check equality over different fragment permutations.
545        with_fragments(|bytes| {
546            assert!(bytes.eq_slice([1_u8, 2, 3, 4, 5].as_ref()));
547            assert!(!bytes.eq_slice([1_u8, 2, 3, 4].as_ref()));
548            assert!(!bytes.eq_slice([1_u8, 2, 3, 4, 5, 6].as_ref()));
549            assert!(!bytes.eq_slice(&[]));
550        });
551
552        // check equality for the empty slice case.
553        let bytes = FragmentedBytes::new_empty();
554        assert!(!bytes.eq_slice([1_u8, 2, 3, 4, 5].as_ref()));
555        assert!(bytes.eq_slice(&[]));
556    }
557
558    #[test]
559    fn test_slice() {
560        // test all valid ranges with all possible permutations of a three way
561        // slice.
562        for i in 0..6 {
563            for j in i..6 {
564                with_fragments(|bytes| {
565                    let range = bytes.slice(i..j);
566                    let x = [1_u8, 2, 3, 4, 5];
567                    assert_eq!(&range.to_flattened_vec()[..], &x[i..j], "{}..{}", i, j);
568                });
569            }
570        }
571    }
572
573    #[test]
574    #[should_panic]
575    fn test_slice_out_of_range() {
576        // check that slicing out of range will panic
577        with_fragments(|bytes| {
578            let _ = bytes.slice(0..15);
579        });
580    }
581
582    #[test]
583    #[should_panic]
584    fn test_copy_into_slice_too_big() {
585        // check that copy_into_slice panics for different lengths.
586        with_fragments(|bytes| {
587            let mut slice = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
588            bytes.copy_into_slice(&mut slice[..]);
589        });
590    }
591
592    #[test]
593    #[should_panic]
594    fn test_copy_into_slice_too_small() {
595        // check that copy_into_slice panics for different lengths.
596        with_fragments(|bytes| {
597            let mut slice = [1, 2];
598            bytes.copy_into_slice(&mut slice[..]);
599        });
600    }
601
602    #[test]
603    fn test_copy_into_slice() {
604        // try copy_into_slice with all different fragment permutations.
605        with_fragments(|bytes| {
606            let mut slice = [0; 5];
607            bytes.copy_into_slice(&mut slice[..]);
608            assert_eq!(slice, &[1, 2, 3, 4, 5][..]);
609        });
610    }
611
612    #[test]
613    #[should_panic]
614    fn test_copy_from_slice_too_big() {
615        // check that copy_from_slice panics for different lengths.
616        with_fragments_mut(|mut bytes| {
617            let slice = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
618            bytes.copy_from_slice(&slice[..]);
619        });
620    }
621
622    #[test]
623    #[should_panic]
624    fn test_copy_from_slice_too_small() {
625        // check that copy_from_slice panics for different lengths.
626        with_fragments_mut(|mut bytes| {
627            let slice = [1, 2, 3];
628            bytes.copy_from_slice(&slice[..]);
629        });
630    }
631
632    #[test]
633    fn test_copy_from_slice() {
634        // test copy_from_slice with all fragment permutations.
635        with_fragments_mut(|mut bytes| {
636            let slice = [10, 20, 30, 40, 50];
637            bytes.copy_from_slice(&slice[..]);
638            assert_eq!(&bytes.to_flattened_vec()[..], &slice[..]);
639        });
640    }
641
642    #[test]
643    fn test_copy_from() {
644        // test copying from another FragmentedByteSlice, going over all
645        // fragment permutations for both src and dst.
646        with_fragments(|src| {
647            with_fragments_mut(|mut dst| {
648                // zer-out dst
649                dst.copy_from_slice(&[0; 5][..]);
650                dst.copy_from(&src);
651                assert_eq!(&dst.to_flattened_vec()[..], &[1_u8, 2, 3, 4, 5][..]);
652            })
653        });
654    }
655
656    #[test]
657    #[should_panic]
658    fn test_copy_from_too_long() {
659        // copying from another FragmentedByteSlice should panic if the lengths
660        // differ.
661        let mut a = [0; 2];
662        let mut b = [0; 2];
663        let mut frags = [a.as_mut(), b.as_mut()];
664        with_fragments(|src| {
665            frags.as_fragmented_byte_slice().copy_from(&src);
666        });
667    }
668
669    #[test]
670    #[should_panic]
671    fn test_copy_from_too_short() {
672        // copying from another FragmentedByteSlice should panic if the lengths
673        // differ.
674        let mut a = [0; 5];
675        let mut b = [0; 2];
676        let mut frags = [a.as_mut(), b.as_mut()];
677        with_fragments(|src| {
678            frags.as_fragmented_byte_slice().copy_from(&src);
679        });
680    }
681
682    #[test]
683    fn test_indexing() {
684        // Test the internal indexing functions over all fragment permutations.
685        with_fragments(|bytes| {
686            for i in 0..5 {
687                // check that get_index addresses the expected byte.
688                let mut idx = bytes.get_index(i);
689                assert_eq!(bytes.0[idx.0][idx.1], (i + 1) as u8);
690
691                // check that we can increase it correctly until the end of the
692                // buffer.
693                for j in 1..(6 - i - 1) {
694                    bytes.increment_index(&mut idx);
695                    assert_eq!(bytes.0[idx.0][idx.1], (i + j + 1) as u8);
696                }
697
698                // fetch the same index again.
699                let mut idx = bytes.get_index(i);
700                assert_eq!(bytes.0[idx.0][idx.1], (i + 1) as u8);
701
702                // check that we can decrease it correctly until the beginning
703                // of the buffer.
704                for j in 1..=i {
705                    bytes.decrement_index(&mut idx);
706                    assert_eq!(bytes.0[idx.0][idx.1], (i - j + 1) as u8);
707                }
708            }
709        });
710    }
711
712    #[test]
713    fn test_copy_within() {
714        with_fragments_mut(|mut bytes| {
715            // copy last half to beginning:
716            bytes.copy_within(3..5, 0);
717            assert_eq!(&bytes.to_flattened_vec()[..], &[4, 5, 3, 4, 5]);
718        });
719        with_fragments_mut(|mut bytes| {
720            // copy first half to end:
721            bytes.copy_within(0..2, 3);
722            assert_eq!(&bytes.to_flattened_vec()[..], &[1, 2, 3, 1, 2]);
723        });
724    }
725
726    #[test]
727    #[should_panic]
728    fn test_copy_within_src_out_of_bounds() {
729        with_fragments_mut(|mut bytes| {
730            // try to copy out of bounds
731            bytes.copy_within(3..15, 0);
732        });
733    }
734
735    #[test]
736    #[should_panic]
737    fn test_copy_within_dst_out_of_bounds() {
738        with_fragments_mut(|mut bytes| {
739            // try to copy out of bounds
740            bytes.copy_within(3..5, 15);
741        });
742    }
743
744    #[test]
745    #[should_panic]
746    fn test_copy_within_bad_range() {
747        with_fragments_mut(|mut bytes| {
748            // pass a bad range (end before start)
749            #[allow(clippy::reversed_empty_ranges)]
750            bytes.copy_within(5..3, 0);
751        });
752    }
753
754    #[test]
755    fn test_get_contiguous() {
756        // If we have fragments, get_contiguous should fail:
757        with_fragments_mut(|mut bytes| {
758            assert!(bytes.try_get_contiguous().is_none());
759            assert!(bytes.try_get_contiguous_mut().is_none());
760            assert!(bytes.try_into_contiguous().is_err());
761        });
762
763        // otherwise we should be able to get the contiguous bytes:
764        let mut single = [1_u8, 2, 3, 4, 5];
765        let mut single = [&mut single[..]];
766        let mut single = single.as_fragmented_byte_slice();
767        assert_eq!(single.try_get_contiguous().unwrap(), &[1, 2, 3, 4, 5][..]);
768        assert_eq!(single.try_get_contiguous_mut().unwrap(), &[1, 2, 3, 4, 5][..]);
769        assert_eq!(single.try_into_contiguous().unwrap(), &[1, 2, 3, 4, 5][..]);
770    }
771
772    #[test]
773    fn test_split_contiguous() {
774        let data = [1_u8, 2, 3, 4, 5, 6];
775
776        // try with a single continuous slice
777        let mut refs = [&data[..]];
778        let frag = refs.as_fragmented_byte_slice();
779        let (head, body, foot) = frag.try_split_contiguous(2..4).unwrap();
780        assert_eq!(head, &data[..2]);
781        assert_eq!(&body.to_flattened_vec()[..], &data[2..4]);
782        assert_eq!(foot, &data[4..]);
783
784        // try splitting just part of the header
785        let mut refs = [&data[0..3], &data[3..]];
786        let frag = refs.as_fragmented_byte_slice();
787        let (head, body, foot) = frag.try_split_contiguous(2..6).unwrap();
788        assert_eq!(head, &data[..2]);
789        assert_eq!(&body.to_flattened_vec()[..], &data[2..]);
790        assert!(foot.is_empty());
791
792        // try splitting just part of the footer
793        let mut refs = [&data[0..3], &data[3..]];
794        let frag = refs.as_fragmented_byte_slice();
795        let (head, body, foot) = frag.try_split_contiguous(..4).unwrap();
796        assert!(head.is_empty());
797        assert_eq!(&body.to_flattened_vec()[..], &data[..4]);
798        assert_eq!(foot, &data[4..]);
799
800        // try completely extracting both:
801        let mut refs = [&data[0..3], &data[3..]];
802        let frag = refs.as_fragmented_byte_slice();
803        let (head, body, foot) = frag.try_split_contiguous(3..3).unwrap();
804        assert_eq!(head, &data[0..3]);
805        assert_eq!(body.len(), 0);
806        assert_eq!(foot, &data[3..]);
807
808        // try getting contiguous bytes from an empty FragmentedByteSlice:
809        let frag = FragmentedBytes::new_empty();
810        let (head, body, foot) = frag.try_split_contiguous(..).unwrap();
811        assert!(head.is_empty());
812        assert!(body.is_empty());
813        assert!(foot.is_empty());
814    }
815
816    #[test]
817    #[should_panic]
818    fn test_split_contiguous_out_of_bounds() {
819        let data = [1_u8, 2, 3, 4, 5, 6];
820        let mut refs = [&data[..]];
821        let frag = refs.as_fragmented_byte_slice();
822        let _ = frag.try_split_contiguous(2..8);
823    }
824
825    #[test]
826    fn test_empty() {
827        // Can create empty FragmentedByteSlices with no fragments or with one
828        // empty fragment.
829        // is_empty should return true for both cases.
830        let empty = FragmentedByteSlice::<&'static [u8]>::new_empty();
831        assert!(empty.is_empty());
832        let empty = [0_u8; 0];
833        let mut empty = [&empty[..]];
834        let empty = empty.as_fragmented_byte_slice();
835        assert!(empty.is_empty());
836    }
837}