zerocopy/
split_at.rs

1// Copyright 2025 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10use super::*;
11use crate::pointer::invariant::{Aligned, Exclusive, Invariants, Shared, Valid};
12
13/// Types that can be split in two.
14///
15/// # Implementation
16///
17/// **Do not implement this trait yourself!** Instead, use
18/// [`#[derive(SplitAt)]`][derive]; e.g.:
19///
20/// ```
21/// # use zerocopy_derive::{SplitAt, KnownLayout};
22/// #[derive(SplitAt, KnownLayout)]
23/// #[repr(C)]
24/// struct MyStruct<T: ?Sized> {
25/// # /*
26///     ...,
27/// # */
28///     // `SplitAt` types must have at least one field.
29///     field: T,
30/// }
31/// ```
32///
33/// This derive performs a sophisticated, compile-time safety analysis to
34/// determine whether a type is `SplitAt`.
35///
36/// # Safety
37///
38/// This trait does not convey any safety guarantees to code outside this crate.
39///
40/// You must not rely on the `#[doc(hidden)]` internals of `SplitAt`. Future
41/// releases of zerocopy may make backwards-breaking changes to these items,
42/// including changes that only affect soundness, which may cause code which
43/// uses those items to silently become unsound.
44///
45#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::SplitAt")]
46#[cfg_attr(
47    not(feature = "derive"),
48    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.SplitAt.html"),
49)]
50#[cfg_attr(
51    zerocopy_diagnostic_on_unimplemented_1_78_0,
52    diagnostic::on_unimplemented(note = "Consider adding `#[derive(SplitAt)]` to `{Self}`")
53)]
54// # Safety
55//
56// The trailing slice is well-aligned for its element type. `Self` is `[T]`, or
57// a `repr(C)` or `repr(transparent)` slice DST.
58pub unsafe trait SplitAt: KnownLayout<PointerMetadata = usize> {
59    /// The element type of the trailing slice.
60    type Elem;
61
62    #[doc(hidden)]
63    fn only_derive_is_allowed_to_implement_this_trait()
64    where
65        Self: Sized;
66
67    /// Unsafely splits `self` in two.
68    ///
69    /// # Safety
70    ///
71    /// The caller promises that `l_len` is not greater than the length of
72    /// `self`'s trailing slice.
73    #[inline]
74    #[must_use]
75    unsafe fn split_at_unchecked(&self, l_len: usize) -> Split<&Self> {
76        // SAFETY: By precondition on the caller, `l_len <= self.len()`.
77        unsafe { Split::<&Self>::new(self, l_len) }
78    }
79
80    /// Attempts to split `self` in two.
81    ///
82    /// Returns `None` if `l_len` is greater than the length of `self`'s
83    /// trailing slice.
84    ///
85    /// # Examples
86    ///
87    /// ```
88    /// use zerocopy::{SplitAt, FromBytes};
89    /// # use zerocopy_derive::*;
90    ///
91    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
92    /// #[repr(C)]
93    /// struct Packet {
94    ///     length: u8,
95    ///     body: [u8],
96    /// }
97    ///
98    /// // These bytes encode a `Packet`.
99    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
100    ///
101    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
102    ///
103    /// assert_eq!(packet.length, 4);
104    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
105    ///
106    /// // Attempt to split `packet` at `length`.
107    /// let split = packet.split_at(packet.length as usize).unwrap();
108    ///
109    /// // Use the `Immutable` bound on `Packet` to prove that it's okay to
110    /// // return concurrent references to `packet` and `rest`.
111    /// let (packet, rest) = split.via_immutable();
112    ///
113    /// assert_eq!(packet.length, 4);
114    /// assert_eq!(packet.body, [1, 2, 3, 4]);
115    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
116    /// ```
117    #[inline]
118    #[must_use = "has no side effects"]
119    fn split_at(&self, l_len: usize) -> Option<Split<&Self>> {
120        MetadataOf::new_in_bounds(self, l_len).map(
121            #[inline(always)]
122            |l_len| {
123                // SAFETY: We have ensured that `l_len <= self.len()` (by
124                // post-condition on `MetadataOf::new_in_bounds`)
125                unsafe { Split::new(self, l_len.get()) }
126            },
127        )
128    }
129
130    /// Unsafely splits `self` in two.
131    ///
132    /// # Safety
133    ///
134    /// The caller promises that `l_len` is not greater than the length of
135    /// `self`'s trailing slice.
136    #[inline]
137    #[must_use]
138    unsafe fn split_at_mut_unchecked(&mut self, l_len: usize) -> Split<&mut Self> {
139        // SAFETY: By precondition on the caller, `l_len <= self.len()`.
140        unsafe { Split::<&mut Self>::new(self, l_len) }
141    }
142
143    /// Attempts to split `self` in two.
144    ///
145    /// Returns `None` if `l_len` is greater than the length of `self`'s
146    /// trailing slice, or if the given `l_len` would result in [the trailing
147    /// padding](KnownLayout#slice-dst-layout) of the left portion overlapping
148    /// the right portion.
149    ///
150    ///
151    /// # Examples
152    ///
153    /// ```
154    /// use zerocopy::{SplitAt, FromBytes};
155    /// # use zerocopy_derive::*;
156    ///
157    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
158    /// #[repr(C)]
159    /// struct Packet<B: ?Sized> {
160    ///     length: u8,
161    ///     body: B,
162    /// }
163    ///
164    /// // These bytes encode a `Packet`.
165    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
166    ///
167    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
168    ///
169    /// assert_eq!(packet.length, 4);
170    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
171    ///
172    /// {
173    ///     // Attempt to split `packet` at `length`.
174    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
175    ///
176    ///     // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
177    ///     // return concurrent references to `packet` and `rest`.
178    ///     let (packet, rest) = split.via_into_bytes();
179    ///
180    ///     assert_eq!(packet.length, 4);
181    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
182    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
183    ///
184    ///     rest.fill(0);
185    /// }
186    ///
187    /// assert_eq!(packet.length, 4);
188    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
189    /// ```
190    #[inline]
191    fn split_at_mut(&mut self, l_len: usize) -> Option<Split<&mut Self>> {
192        MetadataOf::new_in_bounds(self, l_len).map(
193            #[inline(always)]
194            |l_len| {
195                // SAFETY: We have ensured that `l_len <= self.len()` (by
196                // post-condition on `MetadataOf::new_in_bounds`)
197                unsafe { Split::new(self, l_len.get()) }
198            },
199        )
200    }
201}
202
203// SAFETY: `[T]`'s trailing slice is `[T]`, which is trivially aligned.
204unsafe impl<T> SplitAt for [T] {
205    type Elem = T;
206
207    #[inline]
208    #[allow(dead_code)]
209    fn only_derive_is_allowed_to_implement_this_trait()
210    where
211        Self: Sized,
212    {
213    }
214}
215
216/// A `T` that has been split into two possibly-overlapping parts.
217///
218/// For some dynamically sized types, the padding that appears after the
219/// trailing slice field [is a dynamic function of the trailing slice
220/// length](KnownLayout#slice-dst-layout). If `T` is split at a length that
221/// requires trailing padding, the trailing padding of the left part of the
222/// split `T` will overlap the right part. If `T` is a mutable reference or
223/// permits interior mutation, you must ensure that the left and right parts do
224/// not overlap. You can do this at zero-cost using using
225/// [`Self::via_immutable`], [`Self::via_into_bytes`], or
226/// [`Self::via_unaligned`], or with a dynamic check by using
227/// [`Self::via_runtime_check`].
228#[derive(Debug)]
229pub struct Split<T> {
230    /// A pointer to the source slice DST.
231    source: T,
232    /// The length of the future left half of `source`.
233    ///
234    /// # Safety
235    ///
236    /// If `source` is a pointer to a slice DST, `l_len` is no greater than
237    /// `source`'s length.
238    l_len: usize,
239}
240
241impl<T> Split<T> {
242    /// Produces a `Split` of `source` with `l_len`.
243    ///
244    /// # Safety
245    ///
246    /// `l_len` is no greater than `source`'s length.
247    #[inline(always)]
248    unsafe fn new(source: T, l_len: usize) -> Self {
249        Self { source, l_len }
250    }
251}
252
253impl<'a, T> Split<&'a T>
254where
255    T: ?Sized + SplitAt,
256{
257    #[inline(always)]
258    fn into_ptr(self) -> Split<Ptr<'a, T, (Shared, Aligned, Valid)>> {
259        let source = Ptr::from_ref(self.source);
260        // SAFETY: `Ptr::from_ref(self.source)` points to exactly `self.source`
261        // and thus maintains the invariants of `self` with respect to `l_len`.
262        unsafe { Split::new(source, self.l_len) }
263    }
264
265    /// Produces the split parts of `self`, using [`Immutable`] to ensure that
266    /// it is sound to have concurrent references to both parts.
267    ///
268    /// # Examples
269    ///
270    /// ```
271    /// use zerocopy::{SplitAt, FromBytes};
272    /// # use zerocopy_derive::*;
273    ///
274    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
275    /// #[repr(C)]
276    /// struct Packet {
277    ///     length: u8,
278    ///     body: [u8],
279    /// }
280    ///
281    /// // These bytes encode a `Packet`.
282    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
283    ///
284    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
285    ///
286    /// assert_eq!(packet.length, 4);
287    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
288    ///
289    /// // Attempt to split `packet` at `length`.
290    /// let split = packet.split_at(packet.length as usize).unwrap();
291    ///
292    /// // Use the `Immutable` bound on `Packet` to prove that it's okay to
293    /// // return concurrent references to `packet` and `rest`.
294    /// let (packet, rest) = split.via_immutable();
295    ///
296    /// assert_eq!(packet.length, 4);
297    /// assert_eq!(packet.body, [1, 2, 3, 4]);
298    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
299    /// ```
300    #[must_use = "has no side effects"]
301    #[inline(always)]
302    pub fn via_immutable(self) -> (&'a T, &'a [T::Elem])
303    where
304        T: Immutable,
305    {
306        let (l, r) = self.into_ptr().via_immutable();
307        (l.as_ref(), r.as_ref())
308    }
309
310    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
311    /// it is sound to have concurrent references to both parts.
312    ///
313    /// # Examples
314    ///
315    /// ```
316    /// use zerocopy::{SplitAt, FromBytes};
317    /// # use zerocopy_derive::*;
318    ///
319    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, IntoBytes)]
320    /// #[repr(C)]
321    /// struct Packet<B: ?Sized> {
322    ///     length: u8,
323    ///     body: B,
324    /// }
325    ///
326    /// // These bytes encode a `Packet`.
327    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
328    ///
329    /// let packet = Packet::<[u8]>::ref_from_bytes(bytes).unwrap();
330    ///
331    /// assert_eq!(packet.length, 4);
332    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
333    ///
334    /// // Attempt to split `packet` at `length`.
335    /// let split = packet.split_at(packet.length as usize).unwrap();
336    ///
337    /// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
338    /// // return concurrent references to `packet` and `rest`.
339    /// let (packet, rest) = split.via_into_bytes();
340    ///
341    /// assert_eq!(packet.length, 4);
342    /// assert_eq!(packet.body, [1, 2, 3, 4]);
343    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
344    /// ```
345    #[must_use = "has no side effects"]
346    #[inline(always)]
347    pub fn via_into_bytes(self) -> (&'a T, &'a [T::Elem])
348    where
349        T: IntoBytes,
350    {
351        let (l, r) = self.into_ptr().via_into_bytes();
352        (l.as_ref(), r.as_ref())
353    }
354
355    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
356    /// it is sound to have concurrent references to both parts.
357    ///
358    /// # Examples
359    ///
360    /// ```
361    /// use zerocopy::{SplitAt, FromBytes};
362    /// # use zerocopy_derive::*;
363    ///
364    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Unaligned)]
365    /// #[repr(C)]
366    /// struct Packet {
367    ///     length: u8,
368    ///     body: [u8],
369    /// }
370    ///
371    /// // These bytes encode a `Packet`.
372    /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
373    ///
374    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
375    ///
376    /// assert_eq!(packet.length, 4);
377    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
378    ///
379    /// // Attempt to split `packet` at `length`.
380    /// let split = packet.split_at(packet.length as usize).unwrap();
381    ///
382    /// // Use the `Unaligned` bound on `Packet` to prove that it's okay to
383    /// // return concurrent references to `packet` and `rest`.
384    /// let (packet, rest) = split.via_unaligned();
385    ///
386    /// assert_eq!(packet.length, 4);
387    /// assert_eq!(packet.body, [1, 2, 3, 4]);
388    /// assert_eq!(rest, [5, 6, 7, 8, 9]);
389    /// ```
390    #[must_use = "has no side effects"]
391    #[inline(always)]
392    pub fn via_unaligned(self) -> (&'a T, &'a [T::Elem])
393    where
394        T: Unaligned,
395    {
396        let (l, r) = self.into_ptr().via_unaligned();
397        (l.as_ref(), r.as_ref())
398    }
399
400    /// Produces the split parts of `self`, using a dynamic check to ensure that
401    /// it is sound to have concurrent references to both parts. You should
402    /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
403    /// [`Self::via_unaligned`], which have no runtime cost.
404    ///
405    /// Note that this check is overly conservative if `T` is [`Immutable`]; for
406    /// some types, this check will reject some splits which
407    /// [`Self::via_immutable`] will accept.
408    ///
409    /// # Examples
410    ///
411    /// ```
412    /// use zerocopy::{SplitAt, FromBytes, IntoBytes, network_endian::U16};
413    /// # use zerocopy_derive::*;
414    ///
415    /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Debug)]
416    /// #[repr(C, align(2))]
417    /// struct Packet {
418    ///     length: U16,
419    ///     body: [u8],
420    /// }
421    ///
422    /// // These bytes encode a `Packet`.
423    /// let bytes = [
424    ///     4u16.to_be(),
425    ///     1u16.to_be(),
426    ///     2u16.to_be(),
427    ///     3u16.to_be(),
428    ///     4u16.to_be()
429    /// ];
430    ///
431    /// let packet = Packet::ref_from_bytes(bytes.as_bytes()).unwrap();
432    ///
433    /// assert_eq!(packet.length, 4);
434    /// assert_eq!(packet.body, [0, 1, 0, 2, 0, 3, 0, 4]);
435    ///
436    /// // Attempt to split `packet` at `length`.
437    /// let split = packet.split_at(packet.length.into()).unwrap();
438    ///
439    /// // Use a dynamic check to prove that it's okay to return concurrent
440    /// // references to `packet` and `rest`.
441    /// let (packet, rest) = split.via_runtime_check().unwrap();
442    ///
443    /// assert_eq!(packet.length, 4);
444    /// assert_eq!(packet.body, [0, 1, 0, 2]);
445    /// assert_eq!(rest, [0, 3, 0, 4]);
446    ///
447    /// // Attempt to split `packet` at `length - 1`.
448    /// let idx = packet.length.get() - 1;
449    /// let split = packet.split_at(idx as usize).unwrap();
450    ///
451    /// // Attempt (and fail) to use a dynamic check to prove that it's okay
452    /// // to return concurrent references to `packet` and `rest`. Note that
453    /// // this is a case of `via_runtime_check` being overly conservative.
454    /// // Although the left and right parts indeed overlap, the `Immutable`
455    /// // bound ensures that concurrently referencing these overlapping
456    /// // parts is sound.
457    /// assert!(split.via_runtime_check().is_err());
458    /// ```
459    #[must_use = "has no side effects"]
460    #[inline(always)]
461    pub fn via_runtime_check(self) -> Result<(&'a T, &'a [T::Elem]), Self> {
462        match self.into_ptr().via_runtime_check() {
463            Ok((l, r)) => Ok((l.as_ref(), r.as_ref())),
464            Err(s) => Err(s.into_ref()),
465        }
466    }
467
468    /// Unsafely produces the split parts of `self`.
469    ///
470    /// # Safety
471    ///
472    /// If `T` permits interior mutation, the trailing padding bytes of the left
473    /// portion must not overlap the right portion. For some dynamically sized
474    /// types, the padding that appears after the trailing slice field [is a
475    /// dynamic function of the trailing slice
476    /// length](KnownLayout#slice-dst-layout). Thus, for some types, this
477    /// condition is dependent on the length of the left portion.
478    #[must_use = "has no side effects"]
479    #[inline(always)]
480    pub unsafe fn via_unchecked(self) -> (&'a T, &'a [T::Elem]) {
481        // SAFETY: The aliasing of `self.into_ptr()` is not `Exclusive`, but the
482        // caller has promised that if `T` permits interior mutation then the
483        // left and right portions of `self` split at `l_len` do not overlap.
484        let (l, r) = unsafe { self.into_ptr().via_unchecked() };
485        (l.as_ref(), r.as_ref())
486    }
487}
488
489impl<'a, T> Split<&'a mut T>
490where
491    T: ?Sized + SplitAt,
492{
493    #[inline(always)]
494    fn into_ptr(self) -> Split<Ptr<'a, T, (Exclusive, Aligned, Valid)>> {
495        let source = Ptr::from_mut(self.source);
496        // SAFETY: `Ptr::from_mut(self.source)` points to exactly `self.source`,
497        // and thus maintains the invariants of `self` with respect to `l_len`.
498        unsafe { Split::new(source, self.l_len) }
499    }
500
501    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
502    /// it is sound to have concurrent references to both parts.
503    ///
504    /// # Examples
505    ///
506    /// ```
507    /// use zerocopy::{SplitAt, FromBytes};
508    /// # use zerocopy_derive::*;
509    ///
510    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
511    /// #[repr(C)]
512    /// struct Packet<B: ?Sized> {
513    ///     length: u8,
514    ///     body: B,
515    /// }
516    ///
517    /// // These bytes encode a `Packet`.
518    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
519    ///
520    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
521    ///
522    /// assert_eq!(packet.length, 4);
523    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
524    ///
525    /// {
526    ///     // Attempt to split `packet` at `length`.
527    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
528    ///
529    ///     // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
530    ///     // return concurrent references to `packet` and `rest`.
531    ///     let (packet, rest) = split.via_into_bytes();
532    ///
533    ///     assert_eq!(packet.length, 4);
534    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
535    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
536    ///
537    ///     rest.fill(0);
538    /// }
539    ///
540    /// assert_eq!(packet.length, 4);
541    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
542    /// ```
543    #[must_use = "has no side effects"]
544    #[inline(always)]
545    pub fn via_into_bytes(self) -> (&'a mut T, &'a mut [T::Elem])
546    where
547        T: IntoBytes,
548    {
549        let (l, r) = self.into_ptr().via_into_bytes();
550        (l.as_mut(), r.as_mut())
551    }
552
553    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
554    /// it is sound to have concurrent references to both parts.
555    ///
556    /// # Examples
557    ///
558    /// ```
559    /// use zerocopy::{SplitAt, FromBytes};
560    /// # use zerocopy_derive::*;
561    ///
562    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Unaligned)]
563    /// #[repr(C)]
564    /// struct Packet<B: ?Sized> {
565    ///     length: u8,
566    ///     body: B,
567    /// }
568    ///
569    /// // These bytes encode a `Packet`.
570    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
571    ///
572    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
573    ///
574    /// assert_eq!(packet.length, 4);
575    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
576    ///
577    /// {
578    ///     // Attempt to split `packet` at `length`.
579    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
580    ///
581    ///     // Use the `Unaligned` bound on `Packet` to prove that it's okay to
582    ///     // return concurrent references to `packet` and `rest`.
583    ///     let (packet, rest) = split.via_unaligned();
584    ///
585    ///     assert_eq!(packet.length, 4);
586    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
587    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
588    ///
589    ///     rest.fill(0);
590    /// }
591    ///
592    /// assert_eq!(packet.length, 4);
593    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
594    /// ```
595    #[must_use = "has no side effects"]
596    #[inline(always)]
597    pub fn via_unaligned(self) -> (&'a mut T, &'a mut [T::Elem])
598    where
599        T: Unaligned,
600    {
601        let (l, r) = self.into_ptr().via_unaligned();
602        (l.as_mut(), r.as_mut())
603    }
604
605    /// Produces the split parts of `self`, using a dynamic check to ensure that
606    /// it is sound to have concurrent references to both parts. You should
607    /// prefer using [`Self::via_into_bytes`] or [`Self::via_unaligned`], which
608    /// have no runtime cost.
609    ///
610    /// # Examples
611    ///
612    /// ```
613    /// use zerocopy::{SplitAt, FromBytes};
614    /// # use zerocopy_derive::*;
615    ///
616    /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Debug)]
617    /// #[repr(C)]
618    /// struct Packet<B: ?Sized> {
619    ///     length: u8,
620    ///     body: B,
621    /// }
622    ///
623    /// // These bytes encode a `Packet`.
624    /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
625    ///
626    /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
627    ///
628    /// assert_eq!(packet.length, 4);
629    /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
630    ///
631    /// {
632    ///     // Attempt to split `packet` at `length`.
633    ///     let split = packet.split_at_mut(packet.length as usize).unwrap();
634    ///
635    ///     // Use a dynamic check to prove that it's okay to return concurrent
636    ///     // references to `packet` and `rest`.
637    ///     let (packet, rest) = split.via_runtime_check().unwrap();
638    ///
639    ///     assert_eq!(packet.length, 4);
640    ///     assert_eq!(packet.body, [1, 2, 3, 4]);
641    ///     assert_eq!(rest, [5, 6, 7, 8, 9]);
642    ///
643    ///     rest.fill(0);
644    /// }
645    ///
646    /// assert_eq!(packet.length, 4);
647    /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
648    /// ```
649    #[must_use = "has no side effects"]
650    #[inline(always)]
651    pub fn via_runtime_check(self) -> Result<(&'a mut T, &'a mut [T::Elem]), Self> {
652        match self.into_ptr().via_runtime_check() {
653            Ok((l, r)) => Ok((l.as_mut(), r.as_mut())),
654            Err(s) => Err(s.into_mut()),
655        }
656    }
657
658    /// Unsafely produces the split parts of `self`.
659    ///
660    /// # Safety
661    ///
662    /// The trailing padding bytes of the left portion must not overlap the
663    /// right portion. For some dynamically sized types, the padding that
664    /// appears after the trailing slice field [is a dynamic function of the
665    /// trailing slice length](KnownLayout#slice-dst-layout). Thus, for some
666    /// types, this condition is dependent on the length of the left portion.
667    #[must_use = "has no side effects"]
668    #[inline(always)]
669    pub unsafe fn via_unchecked(self) -> (&'a mut T, &'a mut [T::Elem]) {
670        // SAFETY: The aliasing of `self.into_ptr()` is `Exclusive`, and the
671        // caller has promised that the left and right portions of `self` split
672        // at `l_len` do not overlap.
673        let (l, r) = unsafe { self.into_ptr().via_unchecked() };
674        (l.as_mut(), r.as_mut())
675    }
676}
677
678impl<'a, T, I> Split<Ptr<'a, T, I>>
679where
680    T: ?Sized + SplitAt,
681    I: Invariants<Alignment = Aligned, Validity = Valid>,
682{
683    fn into_ref(self) -> Split<&'a T>
684    where
685        I: Invariants<Aliasing = Shared>,
686    {
687        // SAFETY: `self.source.as_ref()` points to exactly the same referent as
688        // `self.source` and thus maintains the invariants of `self` with
689        // respect to `l_len`.
690        unsafe { Split::new(self.source.as_ref(), self.l_len) }
691    }
692
693    fn into_mut(self) -> Split<&'a mut T>
694    where
695        I: Invariants<Aliasing = Exclusive>,
696    {
697        // SAFETY: `self.source.as_mut()` points to exactly the same referent as
698        // `self.source` and thus maintains the invariants of `self` with
699        // respect to `l_len`.
700        unsafe { Split::new(self.source.unify_invariants().as_mut(), self.l_len) }
701    }
702
703    /// Produces the length of `self`'s left part.
704    #[inline(always)]
705    fn l_len(&self) -> MetadataOf<T> {
706        // SAFETY: By invariant on `Split`, `self.l_len` is not greater than the
707        // length of `self.source`.
708        unsafe { MetadataOf::<T>::new_unchecked(self.l_len) }
709    }
710
711    /// Produces the split parts of `self`, using [`Immutable`] to ensure that
712    /// it is sound to have concurrent references to both parts.
713    #[inline(always)]
714    fn via_immutable(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
715    where
716        T: Immutable,
717        I: Invariants<Aliasing = Shared>,
718    {
719        // SAFETY: `Aliasing = Shared` and `T: Immutable`.
720        unsafe { self.via_unchecked() }
721    }
722
723    /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
724    /// it is sound to have concurrent references to both parts.
725    #[inline(always)]
726    fn via_into_bytes(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
727    where
728        T: IntoBytes,
729    {
730        // SAFETY: By `T: IntoBytes`, `T` has no padding for any length.
731        // Consequently, `T` can be split into non-overlapping parts at any
732        // index.
733        unsafe { self.via_unchecked() }
734    }
735
736    /// Produces the split parts of `self`, using [`Unaligned`] to ensure that
737    /// it is sound to have concurrent references to both parts.
738    #[inline(always)]
739    fn via_unaligned(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
740    where
741        T: Unaligned,
742    {
743        // SAFETY: By `T: SplitAt + Unaligned`, `T` is either a slice or a
744        // `repr(C)` or `repr(transparent)` slice DST that is well-aligned at
745        // any address and length. If `T` is a slice DST with alignment 1,
746        // `repr(C)` or `repr(transparent)` ensures that no padding is placed
747        // after the final element of the trailing slice. Consequently, `T` can
748        // be split into strictly non-overlapping parts any any index.
749        unsafe { self.via_unchecked() }
750    }
751
752    /// Produces the split parts of `self`, using a dynamic check to ensure that
753    /// it is sound to have concurrent references to both parts. You should
754    /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
755    /// [`Self::via_unaligned`], which have no runtime cost.
756    #[inline(always)]
757    fn via_runtime_check(self) -> Result<(Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>), Self> {
758        let l_len = self.l_len();
759        // TODO(#1290): Once we require `KnownLayout` on all fields, add an
760        // `IS_IMMUTABLE` associated const, and add `T::IS_IMMUTABLE ||` to the
761        // below check.
762        if l_len.padding_needed_for() == 0 {
763            // SAFETY: By `T: SplitAt`, `T` is either `[T]`, or a `repr(C)` or
764            // `repr(transparent)` slice DST, for which the trailing padding
765            // needed to accomodate `l_len` trailing elements is
766            // `l_len.padding_needed_for()`. If no trailing padding is required,
767            // the left and right parts are strictly non-overlapping.
768            Ok(unsafe { self.via_unchecked() })
769        } else {
770            Err(self)
771        }
772    }
773
774    /// Unsafely produces the split parts of `self`.
775    ///
776    /// # Safety
777    ///
778    /// The caller promises that if `I::Aliasing` is [`Exclusive`] or `T`
779    /// permits interior mutation, then `l_len.padding_needed_for() == 0`.
780    #[inline(always)]
781    unsafe fn via_unchecked(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) {
782        let l_len = self.l_len();
783        let inner = self.source.as_inner();
784
785        // SAFETY: By invariant on `Self::l_len`, `l_len` is not greater than
786        // the length of `inner`'s trailing slice.
787        let (left, right) = unsafe { inner.split_at_unchecked(l_len) };
788
789        // Lemma 0: `left` and `right` conform to the aliasing invariant
790        // `I::Aliasing`. Proof: If `I::Aliasing` is `Exclusive` or `T` permits
791        // interior mutation, the caller promises that `l_len.padding_needed_for()
792        // == 0`. Consequently, by post-condition on `PtrInner::split_at_unchecked`,
793        // there is no trailing padding after `left`'s final element that would
794        // overlap into `right`. If `I::Aliasing` is shared and `T` forbids interior
795        // mutation, then overlap between their referents is permissible.
796
797        // SAFETY:
798        // 0. `left` conforms to the aliasing invariant of `I::Aliasing`, by Lemma 0.
799        // 1. `left` conforms to the alignment invariant of `I::Alignment, because
800        //    the referents of `left` and `Self` have the same address and type
801        //    (and, thus, alignment requirement).
802        // 2. `left` conforms to the validity invariant of `I::Validity`, neither
803        //    the type nor bytes of `left`'s referent have been changed.
804        let left = unsafe { Ptr::from_inner(left) };
805
806        // SAFETY:
807        // 0. `right` conforms to the aliasing invariant of `I::Aliasing`, by Lemma
808        //    0.
809        // 1. `right` conforms to the alignment invariant of `I::Alignment, because
810        //    if `ptr` with `I::Alignment = Aligned`, then by invariant on `T:
811        //    SplitAt`, the trailing slice of `ptr` (from which `right` is derived)
812        //    will also be well-aligned.
813        // 2. `right` conforms to the validity invariant of `I::Validity`,
814        //    because `right: [T::Elem]` is derived from the trailing slice of
815        //    `ptr`, which, by contract on `T: SplitAt::Elem`, has type
816        //    `[T::Elem]`. The `left` part cannot be used to invalidate `right`,
817        //    because the caller promises that if `I::Aliasing` is `Exclusive`
818        //    or `T` permits interior mutation, then `l_len.padding_needed_for()
819        //    == 0` and thus the parts will be non-overlapping.
820        let right = unsafe { Ptr::from_inner(right) };
821
822        (left, right)
823    }
824}
825
826#[cfg(test)]
827mod tests {
828    #[cfg(feature = "derive")]
829    #[test]
830    fn test_split_at() {
831        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
832
833        #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)]
834        #[repr(C)]
835        struct SliceDst<const OFFSET: usize> {
836            prefix: [u8; OFFSET],
837            trailing: [u8],
838        }
839
840        #[allow(clippy::as_conversions)]
841        fn test_split_at<const OFFSET: usize, const BUFFER_SIZE: usize>() {
842            // Test `split_at`
843            let n: usize = BUFFER_SIZE - OFFSET;
844            let arr = [1; BUFFER_SIZE];
845            let dst = SliceDst::<OFFSET>::ref_from_bytes(&arr[..]).unwrap();
846            for i in 0..=n {
847                let (l, r) = dst.split_at(i).unwrap().via_runtime_check().unwrap();
848                let l_sum: u8 = l.trailing.iter().sum();
849                let r_sum: u8 = r.iter().sum();
850                assert_eq!(l_sum, i as u8);
851                assert_eq!(r_sum, (n - i) as u8);
852                assert_eq!(l_sum + r_sum, n as u8);
853            }
854
855            // Test `split_at_mut`
856            let n: usize = BUFFER_SIZE - OFFSET;
857            let mut arr = [1; BUFFER_SIZE];
858            let dst = SliceDst::<OFFSET>::mut_from_bytes(&mut arr[..]).unwrap();
859            for i in 0..=n {
860                let (l, r) = dst.split_at_mut(i).unwrap().via_runtime_check().unwrap();
861                let l_sum: u8 = l.trailing.iter().sum();
862                let r_sum: u8 = r.iter().sum();
863                assert_eq!(l_sum, i as u8);
864                assert_eq!(r_sum, (n - i) as u8);
865                assert_eq!(l_sum + r_sum, n as u8);
866            }
867        }
868
869        test_split_at::<0, 16>();
870        test_split_at::<1, 17>();
871        test_split_at::<2, 18>();
872    }
873
874    #[cfg(feature = "derive")]
875    #[test]
876    #[allow(clippy::as_conversions)]
877    fn test_split_at_overlapping() {
878        use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
879
880        #[derive(FromBytes, KnownLayout, SplitAt, Immutable)]
881        #[repr(C, align(2))]
882        struct SliceDst {
883            prefix: u8,
884            trailing: [u8],
885        }
886
887        const N: usize = 16;
888
889        let arr = [1u16; N];
890        let dst = SliceDst::ref_from_bytes(arr.as_bytes()).unwrap();
891
892        for i in 0..N {
893            let split = dst.split_at(i).unwrap().via_runtime_check();
894            if i % 2 == 1 {
895                assert!(split.is_ok());
896            } else {
897                assert!(split.is_err());
898            }
899        }
900    }
901}