zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! *<span style="font-size: 100%; color:grey;">Need more out of zerocopy?
16//! Submit a [customer request issue][customer-request-issue]!</span>*
17//!
18//! ***<span style="font-size: 140%">Fast, safe, <span
19//! style="color:red;">compile error</span>. Pick two.</span>***
20//!
21//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22//! so you don't have to.
23//!
24//! *Thanks for using zerocopy 0.8! For an overview of what changes from 0.7,
25//! check out our [release notes][release-notes], which include a step-by-step
26//! guide for upgrading from 0.7.*
27//!
28//! *Have questions? Need help? Ask the maintainers on [GitHub][github-q-a] or
29//! on [Discord][discord]!*
30//!
31//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
32//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
33//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
34//! [discord]: https://discord.gg/MAvWH2R6zk
35//!
36//! # Overview
37//!
38//! ##### Conversion Traits
39//!
40//! Zerocopy provides four derivable traits for zero-cost conversions:
41//! - [`TryFromBytes`] indicates that a type may safely be converted from
42//!   certain byte sequences (conditional on runtime checks)
43//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
44//!   instance of a type
45//! - [`FromBytes`] indicates that a type may safely be converted from an
46//!   arbitrary byte sequence
47//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
48//!   sequence
49//!
50//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
51//!
52//! [slice-dsts]: KnownLayout#dynamically-sized-types
53//!
54//! ##### Marker Traits
55//!
56//! Zerocopy provides three derivable marker traits that do not provide any
57//! functionality themselves, but are required to call certain methods provided
58//! by the conversion traits:
59//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
60//!   qualities of a type
61//! - [`Immutable`] indicates that a type is free from interior mutability,
62//!   except by ownership or an exclusive (`&mut`) borrow
63//! - [`Unaligned`] indicates that a type's alignment requirement is 1
64//!
65//! You should generally derive these marker traits whenever possible.
66//!
67//! ##### Conversion Macros
68//!
69//! Zerocopy provides six macros for safe casting between types:
70//!
71//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
72//!   one type to a value of another type of the same size
73//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
74//!   mutable reference of one type to a mutable reference of another type of
75//!   the same size
76//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
77//!   mutable or immutable reference of one type to an immutable reference of
78//!   another type of the same size
79//!
80//! These macros perform *compile-time* size and alignment checks, meaning that
81//! unconditional casts have zero cost at runtime. Conditional casts do not need
82//! to validate size or alignment runtime, but do need to validate contents.
83//!
84//! These macros cannot be used in generic contexts. For generic conversions,
85//! use the methods defined by the [conversion traits](#conversion-traits).
86//!
87//! ##### Byteorder-Aware Numerics
88//!
89//! Zerocopy provides byte-order aware integer types that support these
90//! conversions; see the [`byteorder`] module. These types are especially useful
91//! for network parsing.
92//!
93//! # Cargo Features
94//!
95//! - **`alloc`**
96//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
97//!   the `alloc` crate is added as a dependency, and some allocation-related
98//!   functionality is added.
99//!
100//! - **`std`**
101//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
102//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
103//!   support for some `std` types is added. `std` implies `alloc`.
104//!
105//! - **`derive`**
106//!   Provides derives for the core marker traits via the `zerocopy-derive`
107//!   crate. These derives are re-exported from `zerocopy`, so it is not
108//!   necessary to depend on `zerocopy-derive` directly.
109//!
110//!   However, you may experience better compile times if you instead directly
111//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
112//!   since doing so will allow Rust to compile these crates in parallel. To do
113//!   so, do *not* enable the `derive` feature, and list both dependencies in
114//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
115//!
116//!   ```toml
117//!   [dependencies]
118//!   zerocopy = "0.X"
119//!   zerocopy-derive = "0.X"
120//!   ```
121//!
122//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
123//!   one of your dependencies enables zerocopy's `derive` feature, import
124//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
125//!   zerocopy_derive::FromBytes`).
126//!
127//! - **`simd`**
128//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
129//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
130//!   target platform. Note that the layout of SIMD types is not yet stabilized,
131//!   so these impls may be removed in the future if layout changes make them
132//!   invalid. For more information, see the Unsafe Code Guidelines Reference
133//!   page on the [layout of packed SIMD vectors][simd-layout].
134//!
135//! - **`simd-nightly`**
136//!   Enables the `simd` feature and adds support for SIMD types which are only
137//!   available on nightly. Since these types are unstable, support for any type
138//!   may be removed at any point in the future.
139//!
140//! - **`float-nightly`**
141//!   Adds support for the unstable `f16` and `f128` types. These types are
142//!   not yet fully implemented and may not be supported on all platforms.
143//!
144//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
145//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
146//!
147//! # Security Ethos
148//!
149//! Zerocopy is expressly designed for use in security-critical contexts. We
150//! strive to ensure that that zerocopy code is sound under Rust's current
151//! memory model, and *any future memory model*. We ensure this by:
152//! - **...not 'guessing' about Rust's semantics.**
153//!   We annotate `unsafe` code with a precise rationale for its soundness that
154//!   cites a relevant section of Rust's official documentation. When Rust's
155//!   documented semantics are unclear, we work with the Rust Operational
156//!   Semantics Team to clarify Rust's documentation.
157//! - **...rigorously testing our implementation.**
158//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
159//!   array of supported target platforms of varying endianness and pointer
160//!   width, and across both current and experimental memory models of Rust.
161//! - **...formally proving the correctness of our implementation.**
162//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
163//!   correctness.
164//!
165//! For more information, see our full [soundness policy].
166//!
167//! [Miri]: https://github.com/rust-lang/miri
168//! [Kani]: https://github.com/model-checking/kani
169//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
170//!
171//! # Relationship to Project Safe Transmute
172//!
173//! [Project Safe Transmute] is an official initiative of the Rust Project to
174//! develop language-level support for safer transmutation. The Project consults
175//! with crates like zerocopy to identify aspects of safer transmutation that
176//! would benefit from compiler support, and has developed an [experimental,
177//! compiler-supported analysis][mcp-transmutability] which determines whether,
178//! for a given type, any value of that type may be soundly transmuted into
179//! another type. Once this functionality is sufficiently mature, zerocopy
180//! intends to replace its internal transmutability analysis (implemented by our
181//! custom derives) with the compiler-supported one. This change will likely be
182//! an implementation detail that is invisible to zerocopy's users.
183//!
184//! Project Safe Transmute will not replace the need for most of zerocopy's
185//! higher-level abstractions. The experimental compiler analysis is a tool for
186//! checking the soundness of `unsafe` code, not a tool to avoid writing
187//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
188//! will still be required in order to provide higher-level abstractions on top
189//! of the building block provided by Project Safe Transmute.
190//!
191//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
192//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
193//!
194//! # MSRV
195//!
196//! See our [MSRV policy].
197//!
198//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
199//!
200//! # Changelog
201//!
202//! Zerocopy uses [GitHub Releases].
203//!
204//! [GitHub Releases]: https://github.com/google/zerocopy/releases
205//!
206//! # Thanks
207//!
208//! Zerocopy is maintained by engineers at Google and Amazon with help from
209//! [many wonderful contributors][contributors]. Thank you to everyone who has
210//! lent a hand in making Rust a little more secure!
211//!
212//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
213
214// Sometimes we want to use lints which were added after our MSRV.
215// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
216// this attribute, any unknown lint would cause a CI failure when testing with
217// our MSRV.
218#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
219#![deny(renamed_and_removed_lints)]
220#![deny(
221    anonymous_parameters,
222    deprecated_in_future,
223    late_bound_lifetime_arguments,
224    missing_copy_implementations,
225    missing_debug_implementations,
226    missing_docs,
227    path_statements,
228    patterns_in_fns_without_body,
229    rust_2018_idioms,
230    trivial_numeric_casts,
231    unreachable_pub,
232    unsafe_op_in_unsafe_fn,
233    unused_extern_crates,
234    // We intentionally choose not to deny `unused_qualifications`. When items
235    // are added to the prelude (e.g., `core::mem::size_of`), this has the
236    // consequence of making some uses trigger this lint on the latest toolchain
237    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
238    // does not work on older toolchains.
239    //
240    // We tested a more complicated fix in #1413, but ultimately decided that,
241    // since this lint is just a minor style lint, the complexity isn't worth it
242    // - it's fine to occasionally have unused qualifications slip through,
243    // especially since these do not affect our user-facing API in any way.
244    variant_size_differences
245)]
246#![cfg_attr(
247    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
248    deny(fuzzy_provenance_casts, lossy_provenance_casts)
249)]
250#![deny(
251    clippy::all,
252    clippy::alloc_instead_of_core,
253    clippy::arithmetic_side_effects,
254    clippy::as_underscore,
255    clippy::assertions_on_result_states,
256    clippy::as_conversions,
257    clippy::correctness,
258    clippy::dbg_macro,
259    clippy::decimal_literal_representation,
260    clippy::double_must_use,
261    clippy::get_unwrap,
262    clippy::indexing_slicing,
263    clippy::missing_inline_in_public_items,
264    clippy::missing_safety_doc,
265    clippy::must_use_candidate,
266    clippy::must_use_unit,
267    clippy::obfuscated_if_else,
268    clippy::perf,
269    clippy::print_stdout,
270    clippy::return_self_not_must_use,
271    clippy::std_instead_of_core,
272    clippy::style,
273    clippy::suspicious,
274    clippy::todo,
275    clippy::undocumented_unsafe_blocks,
276    clippy::unimplemented,
277    clippy::unnested_or_patterns,
278    clippy::unwrap_used,
279    clippy::use_debug
280)]
281// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
282// has false positives, and we test on our MSRV in CI, so it doesn't help us
283// anyway.
284#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
285#![deny(
286    rustdoc::bare_urls,
287    rustdoc::broken_intra_doc_links,
288    rustdoc::invalid_codeblock_attributes,
289    rustdoc::invalid_html_tags,
290    rustdoc::invalid_rust_codeblocks,
291    rustdoc::missing_crate_level_docs,
292    rustdoc::private_intra_doc_links
293)]
294// In test code, it makes sense to weight more heavily towards concise, readable
295// code over correct or debuggable code.
296#![cfg_attr(any(test, kani), allow(
297    // In tests, you get line numbers and have access to source code, so panic
298    // messages are less important. You also often unwrap a lot, which would
299    // make expect'ing instead very verbose.
300    clippy::unwrap_used,
301    // In tests, there's no harm to "panic risks" - the worst that can happen is
302    // that your test will fail, and you'll fix it. By contrast, panic risks in
303    // production code introduce the possibly of code panicking unexpectedly "in
304    // the field".
305    clippy::arithmetic_side_effects,
306    clippy::indexing_slicing,
307))]
308#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
309#![cfg_attr(
310    all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
311    feature(stdarch_x86_avx512)
312)]
313#![cfg_attr(
314    all(feature = "simd-nightly", target_arch = "arm"),
315    feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
316)]
317#![cfg_attr(
318    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
319    feature(stdarch_powerpc)
320)]
321#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
322#![cfg_attr(doc_cfg, feature(doc_cfg))]
323#![cfg_attr(
324    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
325    feature(layout_for_ptr, coverage_attribute)
326)]
327
328// This is a hack to allow zerocopy-derive derives to work in this crate. They
329// assume that zerocopy is linked as an extern crate, so they access items from
330// it as `zerocopy::Xxx`. This makes that still work.
331#[cfg(any(feature = "derive", test))]
332extern crate self as zerocopy;
333
334#[doc(hidden)]
335#[macro_use]
336pub mod util;
337
338pub mod byte_slice;
339pub mod byteorder;
340mod deprecated;
341// This module is `pub` so that zerocopy's error types and error handling
342// documentation is grouped together in a cohesive module. In practice, we
343// expect most users to use the re-export of `error`'s items to avoid identifier
344// stuttering.
345pub mod error;
346mod impls;
347#[doc(hidden)]
348pub mod layout;
349mod macros;
350#[doc(hidden)]
351pub mod pointer;
352mod r#ref;
353mod split_at;
354// TODO(#252): If we make this pub, come up with a better name.
355mod wrappers;
356
357pub use crate::byte_slice::*;
358pub use crate::byteorder::*;
359pub use crate::error::*;
360pub use crate::r#ref::*;
361pub use crate::split_at::{Split, SplitAt};
362pub use crate::wrappers::*;
363
364use core::{
365    cell::{Cell, UnsafeCell},
366    cmp::Ordering,
367    fmt::{self, Debug, Display, Formatter},
368    hash::Hasher,
369    marker::PhantomData,
370    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
371    num::{
372        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
373        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
374    },
375    ops::{Deref, DerefMut},
376    ptr::{self, NonNull},
377    slice,
378};
379
380#[cfg(feature = "std")]
381use std::io;
382
383use crate::pointer::invariant::{self, BecauseExclusive};
384
385#[cfg(any(feature = "alloc", test, kani))]
386extern crate alloc;
387#[cfg(any(feature = "alloc", test))]
388use alloc::{boxed::Box, vec::Vec};
389use util::MetadataOf;
390
391#[cfg(any(feature = "alloc", test))]
392use core::alloc::Layout;
393
394// Used by `TryFromBytes::is_bit_valid`.
395#[doc(hidden)]
396pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
397// Used by `KnownLayout`.
398#[doc(hidden)]
399pub use crate::layout::*;
400
401// For each trait polyfill, as soon as the corresponding feature is stable, the
402// polyfill import will be unused because method/function resolution will prefer
403// the inherent method/function over a trait method/function. Thus, we suppress
404// the `unused_imports` warning.
405//
406// See the documentation on `util::polyfills` for more information.
407#[allow(unused_imports)]
408use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
409
410#[rustversion::nightly]
411#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
412const _: () = {
413    #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
414    const _WARNING: () = ();
415    #[warn(deprecated)]
416    _WARNING
417};
418
419// These exist so that code which was written against the old names will get
420// less confusing error messages when they upgrade to a more recent version of
421// zerocopy. On our MSRV toolchain, the error messages read, for example:
422//
423//   error[E0603]: trait `FromZeroes` is private
424//       --> examples/deprecated.rs:1:15
425//        |
426//   1    | use zerocopy::FromZeroes;
427//        |               ^^^^^^^^^^ private trait
428//        |
429//   note: the trait `FromZeroes` is defined here
430//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
431//        |
432//   1845 | use FromZeros as FromZeroes;
433//        |     ^^^^^^^^^^^^^^^^^^^^^^^
434//
435// The "note" provides enough context to make it easy to figure out how to fix
436// the error.
437#[allow(unused)]
438use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
439
440/// Implements [`KnownLayout`].
441///
442/// This derive analyzes various aspects of a type's layout that are needed for
443/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
444/// e.g.:
445///
446/// ```
447/// # use zerocopy_derive::KnownLayout;
448/// #[derive(KnownLayout)]
449/// struct MyStruct {
450/// # /*
451///     ...
452/// # */
453/// }
454///
455/// #[derive(KnownLayout)]
456/// enum MyEnum {
457/// #   V00,
458/// # /*
459///     ...
460/// # */
461/// }
462///
463/// #[derive(KnownLayout)]
464/// union MyUnion {
465/// #   variant: u8,
466/// # /*
467///     ...
468/// # */
469/// }
470/// ```
471///
472/// # Limitations
473///
474/// This derive cannot currently be applied to unsized structs without an
475/// explicit `repr` attribute.
476///
477/// Some invocations of this derive run afoul of a [known bug] in Rust's type
478/// privacy checker. For example, this code:
479///
480/// ```compile_fail,E0446
481/// use zerocopy::*;
482/// # use zerocopy_derive::*;
483///
484/// #[derive(KnownLayout)]
485/// #[repr(C)]
486/// pub struct PublicType {
487///     leading: Foo,
488///     trailing: Bar,
489/// }
490///
491/// #[derive(KnownLayout)]
492/// struct Foo;
493///
494/// #[derive(KnownLayout)]
495/// struct Bar;
496/// ```
497///
498/// ...results in a compilation error:
499///
500/// ```text
501/// error[E0446]: private type `Bar` in public interface
502///  --> examples/bug.rs:3:10
503///    |
504/// 3  | #[derive(KnownLayout)]
505///    |          ^^^^^^^^^^^ can't leak private type
506/// ...
507/// 14 | struct Bar;
508///    | ---------- `Bar` declared as private
509///    |
510///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
511/// ```
512///
513/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
514/// structs whose trailing field type is less public than the enclosing struct.
515///
516/// To work around this, mark the trailing field type `pub` and annotate it with
517/// `#[doc(hidden)]`; e.g.:
518///
519/// ```no_run
520/// use zerocopy::*;
521/// # use zerocopy_derive::*;
522///
523/// #[derive(KnownLayout)]
524/// #[repr(C)]
525/// pub struct PublicType {
526///     leading: Foo,
527///     trailing: Bar,
528/// }
529///
530/// #[derive(KnownLayout)]
531/// struct Foo;
532///
533/// #[doc(hidden)]
534/// #[derive(KnownLayout)]
535/// pub struct Bar; // <- `Bar` is now also `pub`
536/// ```
537///
538/// [known bug]: https://github.com/rust-lang/rust/issues/45713
539#[cfg(any(feature = "derive", test))]
540#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
541pub use zerocopy_derive::KnownLayout;
542
543/// Indicates that zerocopy can reason about certain aspects of a type's layout.
544///
545/// This trait is required by many of zerocopy's APIs. It supports sized types,
546/// slices, and [slice DSTs](#dynamically-sized-types).
547///
548/// # Implementation
549///
550/// **Do not implement this trait yourself!** Instead, use
551/// [`#[derive(KnownLayout)]`][derive]; e.g.:
552///
553/// ```
554/// # use zerocopy_derive::KnownLayout;
555/// #[derive(KnownLayout)]
556/// struct MyStruct {
557/// # /*
558///     ...
559/// # */
560/// }
561///
562/// #[derive(KnownLayout)]
563/// enum MyEnum {
564/// # /*
565///     ...
566/// # */
567/// }
568///
569/// #[derive(KnownLayout)]
570/// union MyUnion {
571/// #   variant: u8,
572/// # /*
573///     ...
574/// # */
575/// }
576/// ```
577///
578/// This derive performs a sophisticated analysis to deduce the layout
579/// characteristics of types. You **must** implement this trait via the derive.
580///
581/// # Dynamically-sized types
582///
583/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
584///
585/// A slice DST is a type whose trailing field is either a slice or another
586/// slice DST, rather than a type with fixed size. For example:
587///
588/// ```
589/// #[repr(C)]
590/// struct PacketHeader {
591/// # /*
592///     ...
593/// # */
594/// }
595///
596/// #[repr(C)]
597/// struct Packet {
598///     header: PacketHeader,
599///     body: [u8],
600/// }
601/// ```
602///
603/// It can be useful to think of slice DSTs as a generalization of slices - in
604/// other words, a normal slice is just the special case of a slice DST with
605/// zero leading fields. In particular:
606/// - Like slices, slice DSTs can have different lengths at runtime
607/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
608///   or via other indirection such as `Box`
609/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
610///   encodes the number of elements in the trailing slice field
611///
612/// ## Slice DST layout
613///
614/// Just like other composite Rust types, the layout of a slice DST is not
615/// well-defined unless it is specified using an explicit `#[repr(...)]`
616/// attribute such as `#[repr(C)]`. [Other representations are
617/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
618/// example.
619///
620/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
621/// types][repr-c-structs], but the presenence of a variable-length field
622/// introduces the possibility of *dynamic padding*. In particular, it may be
623/// necessary to add trailing padding *after* the trailing slice field in order
624/// to satisfy the outer type's alignment, and the amount of padding required
625/// may be a function of the length of the trailing slice field. This is just a
626/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
627/// but it can result in surprising behavior. For example, consider the
628/// following type:
629///
630/// ```
631/// #[repr(C)]
632/// struct Foo {
633///     a: u32,
634///     b: u8,
635///     z: [u16],
636/// }
637/// ```
638///
639/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
640/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
641/// `Foo`:
642///
643/// ```text
644/// byte offset | 01234567
645///       field | aaaab---
646///                    ><
647/// ```
648///
649/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
650/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
651/// round up to offset 6. This means that there is one byte of padding between
652/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
653/// then two bytes of padding after `z` in order to satisfy the overall
654/// alignment of `Foo`. The size of this instance is 8 bytes.
655///
656/// What about if `z` has length 1?
657///
658/// ```text
659/// byte offset | 01234567
660///       field | aaaab-zz
661/// ```
662///
663/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
664/// that we no longer need padding after `z` in order to satisfy `Foo`'s
665/// alignment. We've now seen two different values of `Foo` with two different
666/// lengths of `z`, but they both have the same size - 8 bytes.
667///
668/// What about if `z` has length 2?
669///
670/// ```text
671/// byte offset | 012345678901
672///       field | aaaab-zzzz--
673/// ```
674///
675/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
676/// size to 10, and so we now need another 2 bytes of padding after `z` to
677/// satisfy `Foo`'s alignment.
678///
679/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
680/// applied to slice DSTs, but it can be surprising that the amount of trailing
681/// padding becomes a function of the trailing slice field's length, and thus
682/// can only be computed at runtime.
683///
684/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
685/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
686///
687/// ## What is a valid size?
688///
689/// There are two places in zerocopy's API that we refer to "a valid size" of a
690/// type. In normal casts or conversions, where the source is a byte slice, we
691/// need to know whether the source byte slice is a valid size of the
692/// destination type. In prefix or suffix casts, we need to know whether *there
693/// exists* a valid size of the destination type which fits in the source byte
694/// slice and, if so, what the largest such size is.
695///
696/// As outlined above, a slice DST's size is defined by the number of elements
697/// in its trailing slice field. However, there is not necessarily a 1-to-1
698/// mapping between trailing slice field length and overall size. As we saw in
699/// the previous section with the type `Foo`, instances with both 0 and 1
700/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
701///
702/// When we say "x is a valid size of `T`", we mean one of two things:
703/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
704/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
705///   `T` with `len` trailing slice elements has size `x`
706///
707/// When we say "largest possible size of `T` that fits in a byte slice", we
708/// mean one of two things:
709/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
710///   `size_of::<T>()` bytes long
711/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
712///   that the instance of `T` with `len` trailing slice elements fits in the
713///   byte slice, and to choose the largest such `len`, if any
714///
715///
716/// # Safety
717///
718/// This trait does not convey any safety guarantees to code outside this crate.
719///
720/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
721/// releases of zerocopy may make backwards-breaking changes to these items,
722/// including changes that only affect soundness, which may cause code which
723/// uses those items to silently become unsound.
724///
725#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
726#[cfg_attr(
727    not(feature = "derive"),
728    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
729)]
730#[cfg_attr(
731    zerocopy_diagnostic_on_unimplemented_1_78_0,
732    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
733)]
734pub unsafe trait KnownLayout {
735    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
736    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
737    // it likely won't be in the future, but there's no reason not to be
738    // forwards-compatible with object safety.
739    #[doc(hidden)]
740    fn only_derive_is_allowed_to_implement_this_trait()
741    where
742        Self: Sized;
743
744    /// The type of metadata stored in a pointer to `Self`.
745    ///
746    /// This is `()` for sized types and `usize` for slice DSTs.
747    type PointerMetadata: PointerMetadata;
748
749    /// A maybe-uninitialized analog of `Self`
750    ///
751    /// # Safety
752    ///
753    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
754    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
755    #[doc(hidden)]
756    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
757
758    /// The layout of `Self`.
759    ///
760    /// # Safety
761    ///
762    /// Callers may assume that `LAYOUT` accurately reflects the layout of
763    /// `Self`. In particular:
764    /// - `LAYOUT.align` is equal to `Self`'s alignment
765    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
766    ///   where `size == size_of::<Self>()`
767    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
768    ///   SizeInfo::SliceDst(slice_layout)` where:
769    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
770    ///     slice elements is equal to `slice_layout.offset +
771    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
772    ///     of `LAYOUT.align`
773    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
774    ///     slice_layout.elem_size * elems, size)` are padding and must not be
775    ///     assumed to be initialized
776    #[doc(hidden)]
777    const LAYOUT: DstLayout;
778
779    /// SAFETY: The returned pointer has the same address and provenance as
780    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
781    /// elements in its trailing slice.
782    #[doc(hidden)]
783    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
784
785    /// Extracts the metadata from a pointer to `Self`.
786    ///
787    /// # Safety
788    ///
789    /// `pointer_to_metadata` always returns the correct metadata stored in
790    /// `ptr`.
791    #[doc(hidden)]
792    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
793
794    /// Computes the length of the byte range addressed by `ptr`.
795    ///
796    /// Returns `None` if the resulting length would not fit in an `usize`.
797    ///
798    /// # Safety
799    ///
800    /// Callers may assume that `size_of_val_raw` always returns the correct
801    /// size.
802    ///
803    /// Callers may assume that, if `ptr` addresses a byte range whose length
804    /// fits in an `usize`, this will return `Some`.
805    #[doc(hidden)]
806    #[must_use]
807    #[inline(always)]
808    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
809        let meta = Self::pointer_to_metadata(ptr.as_ptr());
810        // SAFETY: `size_for_metadata` promises to only return `None` if the
811        // resulting size would not fit in a `usize`.
812        meta.size_for_metadata(Self::LAYOUT)
813    }
814}
815
816/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
817#[inline(always)]
818pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
819where
820    T: ?Sized + KnownLayout<PointerMetadata = usize>,
821{
822    trait LayoutFacts {
823        const SIZE_INFO: TrailingSliceLayout;
824    }
825
826    impl<T: ?Sized> LayoutFacts for T
827    where
828        T: KnownLayout<PointerMetadata = usize>,
829    {
830        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
831            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
832            crate::SizeInfo::SliceDst(info) => info,
833        };
834    }
835
836    T::SIZE_INFO
837}
838
839/// The metadata associated with a [`KnownLayout`] type.
840#[doc(hidden)]
841pub trait PointerMetadata: Copy + Eq + Debug {
842    /// Constructs a `Self` from an element count.
843    ///
844    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
845    /// `elems`. No other types are currently supported.
846    fn from_elem_count(elems: usize) -> Self;
847
848    /// Computes the size of the object with the given layout and pointer
849    /// metadata.
850    ///
851    /// # Panics
852    ///
853    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
854    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
855    /// panic.
856    ///
857    /// # Safety
858    ///
859    /// `size_for_metadata` promises to only return `None` if the resulting size
860    /// would not fit in a `usize`.
861    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize>;
862}
863
864impl PointerMetadata for () {
865    #[inline]
866    #[allow(clippy::unused_unit)]
867    fn from_elem_count(_elems: usize) -> () {}
868
869    #[inline]
870    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
871        match layout.size_info {
872            SizeInfo::Sized { size } => Some(size),
873            // NOTE: This branch is unreachable, but we return `None` rather
874            // than `unreachable!()` to avoid generating panic paths.
875            SizeInfo::SliceDst(_) => None,
876        }
877    }
878}
879
880impl PointerMetadata for usize {
881    #[inline]
882    fn from_elem_count(elems: usize) -> usize {
883        elems
884    }
885
886    #[inline]
887    fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> {
888        match layout.size_info {
889            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
890                let slice_len = elem_size.checked_mul(*self)?;
891                let without_padding = offset.checked_add(slice_len)?;
892                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
893            }
894            // NOTE: This branch is unreachable, but we return `None` rather
895            // than `unreachable!()` to avoid generating panic paths.
896            SizeInfo::Sized { .. } => None,
897        }
898    }
899}
900
901// SAFETY: Delegates safety to `DstLayout::for_slice`.
902unsafe impl<T> KnownLayout for [T] {
903    #[allow(clippy::missing_inline_in_public_items, dead_code)]
904    #[cfg_attr(
905        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
906        coverage(off)
907    )]
908    fn only_derive_is_allowed_to_implement_this_trait()
909    where
910        Self: Sized,
911    {
912    }
913
914    type PointerMetadata = usize;
915
916    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
917    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
918    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
919    // identical, because they both lack a fixed-sized prefix and because they
920    // inherit the alignments of their inner element type (which are identical)
921    // [2][3].
922    //
923    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
924    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
925    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
926    // back-to-back [2][3].
927    //
928    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
929    //
930    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
931    //   `T`
932    //
933    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
934    //
935    //   Slices have the same layout as the section of the array they slice.
936    //
937    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
938    //
939    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
940    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
941    //   element of the array is offset from the start of the array by `n *
942    //   size_of::<T>()` bytes.
943    type MaybeUninit = [CoreMaybeUninit<T>];
944
945    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
946
947    // SAFETY: `.cast` preserves address and provenance. The returned pointer
948    // refers to an object with `elems` elements by construction.
949    #[inline(always)]
950    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
951        // TODO(#67): Remove this allow. See NonNullExt for more details.
952        #[allow(unstable_name_collisions)]
953        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
954    }
955
956    #[inline(always)]
957    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
958        #[allow(clippy::as_conversions)]
959        let slc = ptr as *const [()];
960
961        // SAFETY:
962        // - `()` has alignment 1, so `slc` is trivially aligned.
963        // - `slc` was derived from a non-null pointer.
964        // - The size is 0 regardless of the length, so it is sound to
965        //   materialize a reference regardless of location.
966        // - By invariant, `self.ptr` has valid provenance.
967        let slc = unsafe { &*slc };
968
969        // This is correct because the preceding `as` cast preserves the number
970        // of slice elements. [1]
971        //
972        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
973        //
974        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
975        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
976        //   elements in this slice. Casts between these raw pointer types
977        //   preserve the number of elements. ... The same holds for `str` and
978        //   any compound type whose unsized tail is a slice type, such as
979        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
980        slc.len()
981    }
982}
983
984#[rustfmt::skip]
985impl_known_layout!(
986    (),
987    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
988    bool, char,
989    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
990    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
991);
992#[rustfmt::skip]
993#[cfg(feature = "float-nightly")]
994impl_known_layout!(
995    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
996    f16,
997    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
998    f128
999);
1000#[rustfmt::skip]
1001impl_known_layout!(
1002    T         => Option<T>,
1003    T: ?Sized => PhantomData<T>,
1004    T         => Wrapping<T>,
1005    T         => CoreMaybeUninit<T>,
1006    T: ?Sized => *const T,
1007    T: ?Sized => *mut T,
1008    T: ?Sized => &'_ T,
1009    T: ?Sized => &'_ mut T,
1010);
1011impl_known_layout!(const N: usize, T => [T; N]);
1012
1013// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1014// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1015//
1016// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1017//
1018//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1019//   `T`
1020//
1021// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1022//
1023//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1024//   `T`.
1025//
1026// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1027//
1028//   `Cell<T>` has the same in-memory representation as `T`.
1029const _: () = unsafe {
1030    unsafe_impl_known_layout!(
1031        #[repr([u8])]
1032        str
1033    );
1034    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1035    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1036    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1037};
1038
1039// SAFETY:
1040// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1041//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1042//   - Fixed prefix size
1043//   - Alignment
1044//   - (For DSTs) trailing slice element size
1045// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1046//   require the same kind of pointer metadata, and thus it is valid to perform
1047//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1048//   preserves referent size (ie, `size_of_val_raw`).
1049const _: () = unsafe {
1050    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1051};
1052
1053/// Analyzes whether a type is [`FromZeros`].
1054///
1055/// This derive analyzes, at compile time, whether the annotated type satisfies
1056/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1057/// supertraits if it is sound to do so. This derive can be applied to structs,
1058/// enums, and unions; e.g.:
1059///
1060/// ```
1061/// # use zerocopy_derive::{FromZeros, Immutable};
1062/// #[derive(FromZeros)]
1063/// struct MyStruct {
1064/// # /*
1065///     ...
1066/// # */
1067/// }
1068///
1069/// #[derive(FromZeros)]
1070/// #[repr(u8)]
1071/// enum MyEnum {
1072/// #   Variant0,
1073/// # /*
1074///     ...
1075/// # */
1076/// }
1077///
1078/// #[derive(FromZeros, Immutable)]
1079/// union MyUnion {
1080/// #   variant: u8,
1081/// # /*
1082///     ...
1083/// # */
1084/// }
1085/// ```
1086///
1087/// [safety conditions]: trait@FromZeros#safety
1088///
1089/// # Analysis
1090///
1091/// *This section describes, roughly, the analysis performed by this derive to
1092/// determine whether it is sound to implement `FromZeros` for a given type.
1093/// Unless you are modifying the implementation of this derive, or attempting to
1094/// manually implement `FromZeros` for a type yourself, you don't need to read
1095/// this section.*
1096///
1097/// If a type has the following properties, then this derive can implement
1098/// `FromZeros` for that type:
1099///
1100/// - If the type is a struct, all of its fields must be `FromZeros`.
1101/// - If the type is an enum:
1102///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1103///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1104///   - It must have a variant with a discriminant/tag of `0`, and its fields
1105///     must be `FromZeros`. See [the reference] for a description of
1106///     discriminant values are specified.
1107///   - The fields of that variant must be `FromZeros`.
1108///
1109/// This analysis is subject to change. Unsafe code may *only* rely on the
1110/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1111/// implementation details of this derive.
1112///
1113/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1114///
1115/// ## Why isn't an explicit representation required for structs?
1116///
1117/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1118/// that structs are marked with `#[repr(C)]`.
1119///
1120/// Per the [Rust reference](reference),
1121///
1122/// > The representation of a type can change the padding between fields, but
1123/// > does not change the layout of the fields themselves.
1124///
1125/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1126///
1127/// Since the layout of structs only consists of padding bytes and field bytes,
1128/// a struct is soundly `FromZeros` if:
1129/// 1. its padding is soundly `FromZeros`, and
1130/// 2. its fields are soundly `FromZeros`.
1131///
1132/// The answer to the first question is always yes: padding bytes do not have
1133/// any validity constraints. A [discussion] of this question in the Unsafe Code
1134/// Guidelines Working Group concluded that it would be virtually unimaginable
1135/// for future versions of rustc to add validity constraints to padding bytes.
1136///
1137/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1138///
1139/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1140/// its fields are `FromZeros`.
1141// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1142// attribute.
1143#[cfg(any(feature = "derive", test))]
1144#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1145pub use zerocopy_derive::FromZeros;
1146
1147/// Analyzes whether a type is [`Immutable`].
1148///
1149/// This derive analyzes, at compile time, whether the annotated type satisfies
1150/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1151/// sound to do so. This derive can be applied to structs, enums, and unions;
1152/// e.g.:
1153///
1154/// ```
1155/// # use zerocopy_derive::Immutable;
1156/// #[derive(Immutable)]
1157/// struct MyStruct {
1158/// # /*
1159///     ...
1160/// # */
1161/// }
1162///
1163/// #[derive(Immutable)]
1164/// enum MyEnum {
1165/// #   Variant0,
1166/// # /*
1167///     ...
1168/// # */
1169/// }
1170///
1171/// #[derive(Immutable)]
1172/// union MyUnion {
1173/// #   variant: u8,
1174/// # /*
1175///     ...
1176/// # */
1177/// }
1178/// ```
1179///
1180/// # Analysis
1181///
1182/// *This section describes, roughly, the analysis performed by this derive to
1183/// determine whether it is sound to implement `Immutable` for a given type.
1184/// Unless you are modifying the implementation of this derive, you don't need
1185/// to read this section.*
1186///
1187/// If a type has the following properties, then this derive can implement
1188/// `Immutable` for that type:
1189///
1190/// - All fields must be `Immutable`.
1191///
1192/// This analysis is subject to change. Unsafe code may *only* rely on the
1193/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1194/// implementation details of this derive.
1195///
1196/// [safety conditions]: trait@Immutable#safety
1197#[cfg(any(feature = "derive", test))]
1198#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1199pub use zerocopy_derive::Immutable;
1200
1201/// Types which are free from interior mutability.
1202///
1203/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1204/// by ownership or an exclusive (`&mut`) borrow.
1205///
1206/// # Implementation
1207///
1208/// **Do not implement this trait yourself!** Instead, use
1209/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1210/// e.g.:
1211///
1212/// ```
1213/// # use zerocopy_derive::Immutable;
1214/// #[derive(Immutable)]
1215/// struct MyStruct {
1216/// # /*
1217///     ...
1218/// # */
1219/// }
1220///
1221/// #[derive(Immutable)]
1222/// enum MyEnum {
1223/// # /*
1224///     ...
1225/// # */
1226/// }
1227///
1228/// #[derive(Immutable)]
1229/// union MyUnion {
1230/// #   variant: u8,
1231/// # /*
1232///     ...
1233/// # */
1234/// }
1235/// ```
1236///
1237/// This derive performs a sophisticated, compile-time safety analysis to
1238/// determine whether a type is `Immutable`.
1239///
1240/// # Safety
1241///
1242/// Unsafe code outside of this crate must not make any assumptions about `T`
1243/// based on `T: Immutable`. We reserve the right to relax the requirements for
1244/// `Immutable` in the future, and if unsafe code outside of this crate makes
1245/// assumptions based on `T: Immutable`, future relaxations may cause that code
1246/// to become unsound.
1247///
1248// # Safety (Internal)
1249//
1250// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1251// `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location
1252// within the byte range addressed by `t`. This includes ranges of length 0
1253// (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements
1254// `Immutable` which violates this assumptions, it may cause this crate to
1255// exhibit [undefined behavior].
1256//
1257// [`UnsafeCell`]: core::cell::UnsafeCell
1258// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1259#[cfg_attr(
1260    feature = "derive",
1261    doc = "[derive]: zerocopy_derive::Immutable",
1262    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1263)]
1264#[cfg_attr(
1265    not(feature = "derive"),
1266    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1267    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1268)]
1269#[cfg_attr(
1270    zerocopy_diagnostic_on_unimplemented_1_78_0,
1271    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1272)]
1273pub unsafe trait Immutable {
1274    // The `Self: Sized` bound makes it so that `Immutable` is still object
1275    // safe.
1276    #[doc(hidden)]
1277    fn only_derive_is_allowed_to_implement_this_trait()
1278    where
1279        Self: Sized;
1280}
1281
1282/// Implements [`TryFromBytes`].
1283///
1284/// This derive synthesizes the runtime checks required to check whether a
1285/// sequence of initialized bytes corresponds to a valid instance of a type.
1286/// This derive can be applied to structs, enums, and unions; e.g.:
1287///
1288/// ```
1289/// # use zerocopy_derive::{TryFromBytes, Immutable};
1290/// #[derive(TryFromBytes)]
1291/// struct MyStruct {
1292/// # /*
1293///     ...
1294/// # */
1295/// }
1296///
1297/// #[derive(TryFromBytes)]
1298/// #[repr(u8)]
1299/// enum MyEnum {
1300/// #   V00,
1301/// # /*
1302///     ...
1303/// # */
1304/// }
1305///
1306/// #[derive(TryFromBytes, Immutable)]
1307/// union MyUnion {
1308/// #   variant: u8,
1309/// # /*
1310///     ...
1311/// # */
1312/// }
1313/// ```
1314///
1315/// # Portability
1316///
1317/// To ensure consistent endianness for enums with multi-byte representations,
1318/// explicitly specify and convert each discriminant using `.to_le()` or
1319/// `.to_be()`; e.g.:
1320///
1321/// ```
1322/// # use zerocopy_derive::TryFromBytes;
1323/// // `DataStoreVersion` is encoded in little-endian.
1324/// #[derive(TryFromBytes)]
1325/// #[repr(u32)]
1326/// pub enum DataStoreVersion {
1327///     /// Version 1 of the data store.
1328///     V1 = 9u32.to_le(),
1329///
1330///     /// Version 2 of the data store.
1331///     V2 = 10u32.to_le(),
1332/// }
1333/// ```
1334///
1335/// [safety conditions]: trait@TryFromBytes#safety
1336#[cfg(any(feature = "derive", test))]
1337#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1338pub use zerocopy_derive::TryFromBytes;
1339
1340/// Types for which some bit patterns are valid.
1341///
1342/// A memory region of the appropriate length which contains initialized bytes
1343/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1344/// bytes corresponds to a [*valid instance*] of that type. For example,
1345/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1346/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1347/// `1`.
1348///
1349/// # Implementation
1350///
1351/// **Do not implement this trait yourself!** Instead, use
1352/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1353///
1354/// ```
1355/// # use zerocopy_derive::{TryFromBytes, Immutable};
1356/// #[derive(TryFromBytes)]
1357/// struct MyStruct {
1358/// # /*
1359///     ...
1360/// # */
1361/// }
1362///
1363/// #[derive(TryFromBytes)]
1364/// #[repr(u8)]
1365/// enum MyEnum {
1366/// #   V00,
1367/// # /*
1368///     ...
1369/// # */
1370/// }
1371///
1372/// #[derive(TryFromBytes, Immutable)]
1373/// union MyUnion {
1374/// #   variant: u8,
1375/// # /*
1376///     ...
1377/// # */
1378/// }
1379/// ```
1380///
1381/// This derive ensures that the runtime check of whether bytes correspond to a
1382/// valid instance is sound. You **must** implement this trait via the derive.
1383///
1384/// # What is a "valid instance"?
1385///
1386/// In Rust, each type has *bit validity*, which refers to the set of bit
1387/// patterns which may appear in an instance of that type. It is impossible for
1388/// safe Rust code to produce values which violate bit validity (ie, values
1389/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1390/// invalid value, this is considered [undefined behavior].
1391///
1392/// Rust's bit validity rules are currently being decided, which means that some
1393/// types have three classes of bit patterns: those which are definitely valid,
1394/// and whose validity is documented in the language; those which may or may not
1395/// be considered valid at some point in the future; and those which are
1396/// definitely invalid.
1397///
1398/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1399/// be valid if its validity is a documenteed guarantee provided by the
1400/// language.
1401///
1402/// For most use cases, Rust's current guarantees align with programmers'
1403/// intuitions about what ought to be valid. As a result, zerocopy's
1404/// conservatism should not affect most users.
1405///
1406/// If you are negatively affected by lack of support for a particular type,
1407/// we encourage you to let us know by [filing an issue][github-repo].
1408///
1409/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1410///
1411/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1412/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1413/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1414/// IntoBytes`, there exist values of `t: T` such that
1415/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1416/// generally assume that values produced by `IntoBytes` will necessarily be
1417/// accepted as valid by `TryFromBytes`.
1418///
1419/// # Safety
1420///
1421/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1422/// or representation of `T`. It merely provides the ability to perform a
1423/// validity check at runtime via methods like [`try_ref_from_bytes`].
1424///
1425/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1426/// Future releases of zerocopy may make backwards-breaking changes to these
1427/// items, including changes that only affect soundness, which may cause code
1428/// which uses those items to silently become unsound.
1429///
1430/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1431/// [github-repo]: https://github.com/google/zerocopy
1432/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1433/// [*valid instance*]: #what-is-a-valid-instance
1434#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1435#[cfg_attr(
1436    not(feature = "derive"),
1437    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1438)]
1439#[cfg_attr(
1440    zerocopy_diagnostic_on_unimplemented_1_78_0,
1441    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1442)]
1443pub unsafe trait TryFromBytes {
1444    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1445    // safe.
1446    #[doc(hidden)]
1447    fn only_derive_is_allowed_to_implement_this_trait()
1448    where
1449        Self: Sized;
1450
1451    /// Does a given memory range contain a valid instance of `Self`?
1452    ///
1453    /// # Safety
1454    ///
1455    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1456    /// `*candidate` contains a valid `Self`.
1457    ///
1458    /// # Panics
1459    ///
1460    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1461    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1462    /// panicking. (We support user-defined validation routines; so long as
1463    /// these routines are not required to be `unsafe`, there is no way to
1464    /// ensure that these do not generate panics.)
1465    ///
1466    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1467    /// either panic or fail to compile if called on a pointer with [`Shared`]
1468    /// aliasing when `Self: !Immutable`.
1469    ///
1470    /// [`UnsafeCell`]: core::cell::UnsafeCell
1471    /// [`Shared`]: invariant::Shared
1472    #[doc(hidden)]
1473    fn is_bit_valid<A: invariant::Reference>(candidate: Maybe<'_, Self, A>) -> bool;
1474
1475    /// Attempts to interpret the given `source` as a `&Self`.
1476    ///
1477    /// If the bytes of `source` are a valid instance of `Self`, this method
1478    /// returns a reference to those bytes interpreted as a `Self`. If the
1479    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1480    /// `source` is not appropriately aligned, or if `source` is not a valid
1481    /// instance of `Self`, this returns `Err`. If [`Self:
1482    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1483    /// error][ConvertError::from].
1484    ///
1485    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1486    ///
1487    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1488    /// [self-unaligned]: Unaligned
1489    /// [slice-dst]: KnownLayout#dynamically-sized-types
1490    ///
1491    /// # Compile-Time Assertions
1492    ///
1493    /// This method cannot yet be used on unsized types whose dynamically-sized
1494    /// component is zero-sized. Attempting to use this method on such types
1495    /// results in a compile-time assertion error; e.g.:
1496    ///
1497    /// ```compile_fail,E0080
1498    /// use zerocopy::*;
1499    /// # use zerocopy_derive::*;
1500    ///
1501    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1502    /// #[repr(C)]
1503    /// struct ZSTy {
1504    ///     leading_sized: u16,
1505    ///     trailing_dst: [()],
1506    /// }
1507    ///
1508    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1509    /// ```
1510    ///
1511    /// # Examples
1512    ///
1513    /// ```
1514    /// use zerocopy::TryFromBytes;
1515    /// # use zerocopy_derive::*;
1516    ///
1517    /// // The only valid value of this type is the byte `0xC0`
1518    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1519    /// #[repr(u8)]
1520    /// enum C0 { xC0 = 0xC0 }
1521    ///
1522    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1523    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1524    /// #[repr(C)]
1525    /// struct C0C0(C0, C0);
1526    ///
1527    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1528    /// #[repr(C)]
1529    /// struct Packet {
1530    ///     magic_number: C0C0,
1531    ///     mug_size: u8,
1532    ///     temperature: u8,
1533    ///     marshmallows: [[u8; 2]],
1534    /// }
1535    ///
1536    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1537    ///
1538    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1539    ///
1540    /// assert_eq!(packet.mug_size, 240);
1541    /// assert_eq!(packet.temperature, 77);
1542    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1543    ///
1544    /// // These bytes are not valid instance of `Packet`.
1545    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1546    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1547    /// ```
1548    #[must_use = "has no side effects"]
1549    #[inline]
1550    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1551    where
1552        Self: KnownLayout + Immutable,
1553    {
1554        static_assert_dst_is_not_zst!(Self);
1555        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1556            Ok(source) => {
1557                // This call may panic. If that happens, it doesn't cause any soundness
1558                // issues, as we have not generated any invalid state which we need to
1559                // fix before returning.
1560                //
1561                // Note that one panic or post-monomorphization error condition is
1562                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1563                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1564                // condition will not happen.
1565                match source.try_into_valid() {
1566                    Ok(valid) => Ok(valid.as_ref()),
1567                    Err(e) => {
1568                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1569                    }
1570                }
1571            }
1572            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1573        }
1574    }
1575
1576    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1577    ///
1578    /// This method computes the [largest possible size of `Self`][valid-size]
1579    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1580    /// instance of `Self`, this method returns a reference to those bytes
1581    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1582    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1583    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1584    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1585    /// alignment error][ConvertError::from].
1586    ///
1587    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1588    ///
1589    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1590    /// [self-unaligned]: Unaligned
1591    /// [slice-dst]: KnownLayout#dynamically-sized-types
1592    ///
1593    /// # Compile-Time Assertions
1594    ///
1595    /// This method cannot yet be used on unsized types whose dynamically-sized
1596    /// component is zero-sized. Attempting to use this method on such types
1597    /// results in a compile-time assertion error; e.g.:
1598    ///
1599    /// ```compile_fail,E0080
1600    /// use zerocopy::*;
1601    /// # use zerocopy_derive::*;
1602    ///
1603    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1604    /// #[repr(C)]
1605    /// struct ZSTy {
1606    ///     leading_sized: u16,
1607    ///     trailing_dst: [()],
1608    /// }
1609    ///
1610    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1611    /// ```
1612    ///
1613    /// # Examples
1614    ///
1615    /// ```
1616    /// use zerocopy::TryFromBytes;
1617    /// # use zerocopy_derive::*;
1618    ///
1619    /// // The only valid value of this type is the byte `0xC0`
1620    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1621    /// #[repr(u8)]
1622    /// enum C0 { xC0 = 0xC0 }
1623    ///
1624    /// // The only valid value of this type is the bytes `0xC0C0`.
1625    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1626    /// #[repr(C)]
1627    /// struct C0C0(C0, C0);
1628    ///
1629    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1630    /// #[repr(C)]
1631    /// struct Packet {
1632    ///     magic_number: C0C0,
1633    ///     mug_size: u8,
1634    ///     temperature: u8,
1635    ///     marshmallows: [[u8; 2]],
1636    /// }
1637    ///
1638    /// // These are more bytes than are needed to encode a `Packet`.
1639    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1640    ///
1641    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1642    ///
1643    /// assert_eq!(packet.mug_size, 240);
1644    /// assert_eq!(packet.temperature, 77);
1645    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1646    /// assert_eq!(suffix, &[6u8][..]);
1647    ///
1648    /// // These bytes are not valid instance of `Packet`.
1649    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1650    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1651    /// ```
1652    #[must_use = "has no side effects"]
1653    #[inline]
1654    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1655    where
1656        Self: KnownLayout + Immutable,
1657    {
1658        static_assert_dst_is_not_zst!(Self);
1659        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1660    }
1661
1662    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1663    ///
1664    /// This method computes the [largest possible size of `Self`][valid-size]
1665    /// that can fit in the trailing bytes of `source`. If that suffix is a
1666    /// valid instance of `Self`, this method returns a reference to those bytes
1667    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1668    /// are insufficient bytes, or if the suffix of `source` would not be
1669    /// appropriately aligned, or if the suffix is not a valid instance of
1670    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1671    /// can [infallibly discard the alignment error][ConvertError::from].
1672    ///
1673    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1674    ///
1675    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1676    /// [self-unaligned]: Unaligned
1677    /// [slice-dst]: KnownLayout#dynamically-sized-types
1678    ///
1679    /// # Compile-Time Assertions
1680    ///
1681    /// This method cannot yet be used on unsized types whose dynamically-sized
1682    /// component is zero-sized. Attempting to use this method on such types
1683    /// results in a compile-time assertion error; e.g.:
1684    ///
1685    /// ```compile_fail,E0080
1686    /// use zerocopy::*;
1687    /// # use zerocopy_derive::*;
1688    ///
1689    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1690    /// #[repr(C)]
1691    /// struct ZSTy {
1692    ///     leading_sized: u16,
1693    ///     trailing_dst: [()],
1694    /// }
1695    ///
1696    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1697    /// ```
1698    ///
1699    /// # Examples
1700    ///
1701    /// ```
1702    /// use zerocopy::TryFromBytes;
1703    /// # use zerocopy_derive::*;
1704    ///
1705    /// // The only valid value of this type is the byte `0xC0`
1706    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1707    /// #[repr(u8)]
1708    /// enum C0 { xC0 = 0xC0 }
1709    ///
1710    /// // The only valid value of this type is the bytes `0xC0C0`.
1711    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1712    /// #[repr(C)]
1713    /// struct C0C0(C0, C0);
1714    ///
1715    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1716    /// #[repr(C)]
1717    /// struct Packet {
1718    ///     magic_number: C0C0,
1719    ///     mug_size: u8,
1720    ///     temperature: u8,
1721    ///     marshmallows: [[u8; 2]],
1722    /// }
1723    ///
1724    /// // These are more bytes than are needed to encode a `Packet`.
1725    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1726    ///
1727    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1728    ///
1729    /// assert_eq!(packet.mug_size, 240);
1730    /// assert_eq!(packet.temperature, 77);
1731    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1732    /// assert_eq!(prefix, &[0u8][..]);
1733    ///
1734    /// // These bytes are not valid instance of `Packet`.
1735    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1736    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1737    /// ```
1738    #[must_use = "has no side effects"]
1739    #[inline]
1740    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1741    where
1742        Self: KnownLayout + Immutable,
1743    {
1744        static_assert_dst_is_not_zst!(Self);
1745        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1746    }
1747
1748    /// Attempts to interpret the given `source` as a `&mut Self` without
1749    /// copying.
1750    ///
1751    /// If the bytes of `source` are a valid instance of `Self`, this method
1752    /// returns a reference to those bytes interpreted as a `Self`. If the
1753    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1754    /// `source` is not appropriately aligned, or if `source` is not a valid
1755    /// instance of `Self`, this returns `Err`. If [`Self:
1756    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1757    /// error][ConvertError::from].
1758    ///
1759    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1760    ///
1761    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1762    /// [self-unaligned]: Unaligned
1763    /// [slice-dst]: KnownLayout#dynamically-sized-types
1764    ///
1765    /// # Compile-Time Assertions
1766    ///
1767    /// This method cannot yet be used on unsized types whose dynamically-sized
1768    /// component is zero-sized. Attempting to use this method on such types
1769    /// results in a compile-time assertion error; e.g.:
1770    ///
1771    /// ```compile_fail,E0080
1772    /// use zerocopy::*;
1773    /// # use zerocopy_derive::*;
1774    ///
1775    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1776    /// #[repr(C, packed)]
1777    /// struct ZSTy {
1778    ///     leading_sized: [u8; 2],
1779    ///     trailing_dst: [()],
1780    /// }
1781    ///
1782    /// let mut source = [85, 85];
1783    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
1784    /// ```
1785    ///
1786    /// # Examples
1787    ///
1788    /// ```
1789    /// use zerocopy::TryFromBytes;
1790    /// # use zerocopy_derive::*;
1791    ///
1792    /// // The only valid value of this type is the byte `0xC0`
1793    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1794    /// #[repr(u8)]
1795    /// enum C0 { xC0 = 0xC0 }
1796    ///
1797    /// // The only valid value of this type is the bytes `0xC0C0`.
1798    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1799    /// #[repr(C)]
1800    /// struct C0C0(C0, C0);
1801    ///
1802    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1803    /// #[repr(C, packed)]
1804    /// struct Packet {
1805    ///     magic_number: C0C0,
1806    ///     mug_size: u8,
1807    ///     temperature: u8,
1808    ///     marshmallows: [[u8; 2]],
1809    /// }
1810    ///
1811    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1812    ///
1813    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
1814    ///
1815    /// assert_eq!(packet.mug_size, 240);
1816    /// assert_eq!(packet.temperature, 77);
1817    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1818    ///
1819    /// packet.temperature = 111;
1820    ///
1821    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
1822    ///
1823    /// // These bytes are not valid instance of `Packet`.
1824    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1825    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
1826    /// ```
1827    #[must_use = "has no side effects"]
1828    #[inline]
1829    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
1830    where
1831        Self: KnownLayout + IntoBytes,
1832    {
1833        static_assert_dst_is_not_zst!(Self);
1834        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
1835            Ok(source) => {
1836                // This call may panic. If that happens, it doesn't cause any soundness
1837                // issues, as we have not generated any invalid state which we need to
1838                // fix before returning.
1839                //
1840                // Note that one panic or post-monomorphization error condition is
1841                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1842                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1843                // condition will not happen.
1844                match source.try_into_valid() {
1845                    Ok(source) => Ok(source.as_mut()),
1846                    Err(e) => {
1847                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
1848                    }
1849                }
1850            }
1851            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
1852        }
1853    }
1854
1855    /// Attempts to interpret the prefix of the given `source` as a `&mut
1856    /// Self`.
1857    ///
1858    /// This method computes the [largest possible size of `Self`][valid-size]
1859    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1860    /// instance of `Self`, this method returns a reference to those bytes
1861    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1862    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1863    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
1864    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1865    /// alignment error][ConvertError::from].
1866    ///
1867    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1868    ///
1869    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1870    /// [self-unaligned]: Unaligned
1871    /// [slice-dst]: KnownLayout#dynamically-sized-types
1872    ///
1873    /// # Compile-Time Assertions
1874    ///
1875    /// This method cannot yet be used on unsized types whose dynamically-sized
1876    /// component is zero-sized. Attempting to use this method on such types
1877    /// results in a compile-time assertion error; e.g.:
1878    ///
1879    /// ```compile_fail,E0080
1880    /// use zerocopy::*;
1881    /// # use zerocopy_derive::*;
1882    ///
1883    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1884    /// #[repr(C, packed)]
1885    /// struct ZSTy {
1886    ///     leading_sized: [u8; 2],
1887    ///     trailing_dst: [()],
1888    /// }
1889    ///
1890    /// let mut source = [85, 85];
1891    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
1892    /// ```
1893    ///
1894    /// # Examples
1895    ///
1896    /// ```
1897    /// use zerocopy::TryFromBytes;
1898    /// # use zerocopy_derive::*;
1899    ///
1900    /// // The only valid value of this type is the byte `0xC0`
1901    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1902    /// #[repr(u8)]
1903    /// enum C0 { xC0 = 0xC0 }
1904    ///
1905    /// // The only valid value of this type is the bytes `0xC0C0`.
1906    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1907    /// #[repr(C)]
1908    /// struct C0C0(C0, C0);
1909    ///
1910    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1911    /// #[repr(C, packed)]
1912    /// struct Packet {
1913    ///     magic_number: C0C0,
1914    ///     mug_size: u8,
1915    ///     temperature: u8,
1916    ///     marshmallows: [[u8; 2]],
1917    /// }
1918    ///
1919    /// // These are more bytes than are needed to encode a `Packet`.
1920    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1921    ///
1922    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
1923    ///
1924    /// assert_eq!(packet.mug_size, 240);
1925    /// assert_eq!(packet.temperature, 77);
1926    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1927    /// assert_eq!(suffix, &[6u8][..]);
1928    ///
1929    /// packet.temperature = 111;
1930    /// suffix[0] = 222;
1931    ///
1932    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
1933    ///
1934    /// // These bytes are not valid instance of `Packet`.
1935    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1936    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
1937    /// ```
1938    #[must_use = "has no side effects"]
1939    #[inline]
1940    fn try_mut_from_prefix(
1941        source: &mut [u8],
1942    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
1943    where
1944        Self: KnownLayout + IntoBytes,
1945    {
1946        static_assert_dst_is_not_zst!(Self);
1947        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
1948    }
1949
1950    /// Attempts to interpret the suffix of the given `source` as a `&mut
1951    /// Self`.
1952    ///
1953    /// This method computes the [largest possible size of `Self`][valid-size]
1954    /// that can fit in the trailing bytes of `source`. If that suffix is a
1955    /// valid instance of `Self`, this method returns a reference to those bytes
1956    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1957    /// are insufficient bytes, or if the suffix of `source` would not be
1958    /// appropriately aligned, or if the suffix is not a valid instance of
1959    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1960    /// can [infallibly discard the alignment error][ConvertError::from].
1961    ///
1962    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1963    ///
1964    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1965    /// [self-unaligned]: Unaligned
1966    /// [slice-dst]: KnownLayout#dynamically-sized-types
1967    ///
1968    /// # Compile-Time Assertions
1969    ///
1970    /// This method cannot yet be used on unsized types whose dynamically-sized
1971    /// component is zero-sized. Attempting to use this method on such types
1972    /// results in a compile-time assertion error; e.g.:
1973    ///
1974    /// ```compile_fail,E0080
1975    /// use zerocopy::*;
1976    /// # use zerocopy_derive::*;
1977    ///
1978    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1979    /// #[repr(C, packed)]
1980    /// struct ZSTy {
1981    ///     leading_sized: u16,
1982    ///     trailing_dst: [()],
1983    /// }
1984    ///
1985    /// let mut source = [85, 85];
1986    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
1987    /// ```
1988    ///
1989    /// # Examples
1990    ///
1991    /// ```
1992    /// use zerocopy::TryFromBytes;
1993    /// # use zerocopy_derive::*;
1994    ///
1995    /// // The only valid value of this type is the byte `0xC0`
1996    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1997    /// #[repr(u8)]
1998    /// enum C0 { xC0 = 0xC0 }
1999    ///
2000    /// // The only valid value of this type is the bytes `0xC0C0`.
2001    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2002    /// #[repr(C)]
2003    /// struct C0C0(C0, C0);
2004    ///
2005    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2006    /// #[repr(C, packed)]
2007    /// struct Packet {
2008    ///     magic_number: C0C0,
2009    ///     mug_size: u8,
2010    ///     temperature: u8,
2011    ///     marshmallows: [[u8; 2]],
2012    /// }
2013    ///
2014    /// // These are more bytes than are needed to encode a `Packet`.
2015    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2016    ///
2017    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2018    ///
2019    /// assert_eq!(packet.mug_size, 240);
2020    /// assert_eq!(packet.temperature, 77);
2021    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2022    /// assert_eq!(prefix, &[0u8][..]);
2023    ///
2024    /// prefix[0] = 111;
2025    /// packet.temperature = 222;
2026    ///
2027    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2028    ///
2029    /// // These bytes are not valid instance of `Packet`.
2030    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2031    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2032    /// ```
2033    #[must_use = "has no side effects"]
2034    #[inline]
2035    fn try_mut_from_suffix(
2036        source: &mut [u8],
2037    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2038    where
2039        Self: KnownLayout + IntoBytes,
2040    {
2041        static_assert_dst_is_not_zst!(Self);
2042        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2043    }
2044
2045    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2046    /// equal to `count`.
2047    ///
2048    /// This method attempts to return a reference to `source` interpreted as a
2049    /// `Self` with `count` trailing elements. If the length of `source` is not
2050    /// equal to the size of `Self` with `count` elements, if `source` is not
2051    /// appropriately aligned, or if `source` does not contain a valid instance
2052    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2053    /// you can [infallibly discard the alignment error][ConvertError::from].
2054    ///
2055    /// [self-unaligned]: Unaligned
2056    /// [slice-dst]: KnownLayout#dynamically-sized-types
2057    ///
2058    /// # Examples
2059    ///
2060    /// ```
2061    /// # #![allow(non_camel_case_types)] // For C0::xC0
2062    /// use zerocopy::TryFromBytes;
2063    /// # use zerocopy_derive::*;
2064    ///
2065    /// // The only valid value of this type is the byte `0xC0`
2066    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2067    /// #[repr(u8)]
2068    /// enum C0 { xC0 = 0xC0 }
2069    ///
2070    /// // The only valid value of this type is the bytes `0xC0C0`.
2071    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2072    /// #[repr(C)]
2073    /// struct C0C0(C0, C0);
2074    ///
2075    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2076    /// #[repr(C)]
2077    /// struct Packet {
2078    ///     magic_number: C0C0,
2079    ///     mug_size: u8,
2080    ///     temperature: u8,
2081    ///     marshmallows: [[u8; 2]],
2082    /// }
2083    ///
2084    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2085    ///
2086    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2087    ///
2088    /// assert_eq!(packet.mug_size, 240);
2089    /// assert_eq!(packet.temperature, 77);
2090    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2091    ///
2092    /// // These bytes are not valid instance of `Packet`.
2093    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2094    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2095    /// ```
2096    ///
2097    /// Since an explicit `count` is provided, this method supports types with
2098    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2099    /// which do not take an explicit count do not support such types.
2100    ///
2101    /// ```
2102    /// use core::num::NonZeroU16;
2103    /// use zerocopy::*;
2104    /// # use zerocopy_derive::*;
2105    ///
2106    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2107    /// #[repr(C)]
2108    /// struct ZSTy {
2109    ///     leading_sized: NonZeroU16,
2110    ///     trailing_dst: [()],
2111    /// }
2112    ///
2113    /// let src = 0xCAFEu16.as_bytes();
2114    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2115    /// assert_eq!(zsty.trailing_dst.len(), 42);
2116    /// ```
2117    ///
2118    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2119    #[must_use = "has no side effects"]
2120    #[inline]
2121    fn try_ref_from_bytes_with_elems(
2122        source: &[u8],
2123        count: usize,
2124    ) -> Result<&Self, TryCastError<&[u8], Self>>
2125    where
2126        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2127    {
2128        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2129        {
2130            Ok(source) => {
2131                // This call may panic. If that happens, it doesn't cause any soundness
2132                // issues, as we have not generated any invalid state which we need to
2133                // fix before returning.
2134                //
2135                // Note that one panic or post-monomorphization error condition is
2136                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2137                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2138                // condition will not happen.
2139                match source.try_into_valid() {
2140                    Ok(source) => Ok(source.as_ref()),
2141                    Err(e) => {
2142                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2143                    }
2144                }
2145            }
2146            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2147        }
2148    }
2149
2150    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2151    /// a DST length equal to `count`.
2152    ///
2153    /// This method attempts to return a reference to the prefix of `source`
2154    /// interpreted as a `Self` with `count` trailing elements, and a reference
2155    /// to the remaining bytes. If the length of `source` is less than the size
2156    /// of `Self` with `count` elements, if `source` is not appropriately
2157    /// aligned, or if the prefix of `source` does not contain a valid instance
2158    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2159    /// you can [infallibly discard the alignment error][ConvertError::from].
2160    ///
2161    /// [self-unaligned]: Unaligned
2162    /// [slice-dst]: KnownLayout#dynamically-sized-types
2163    ///
2164    /// # Examples
2165    ///
2166    /// ```
2167    /// # #![allow(non_camel_case_types)] // For C0::xC0
2168    /// use zerocopy::TryFromBytes;
2169    /// # use zerocopy_derive::*;
2170    ///
2171    /// // The only valid value of this type is the byte `0xC0`
2172    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2173    /// #[repr(u8)]
2174    /// enum C0 { xC0 = 0xC0 }
2175    ///
2176    /// // The only valid value of this type is the bytes `0xC0C0`.
2177    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2178    /// #[repr(C)]
2179    /// struct C0C0(C0, C0);
2180    ///
2181    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2182    /// #[repr(C)]
2183    /// struct Packet {
2184    ///     magic_number: C0C0,
2185    ///     mug_size: u8,
2186    ///     temperature: u8,
2187    ///     marshmallows: [[u8; 2]],
2188    /// }
2189    ///
2190    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2191    ///
2192    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2193    ///
2194    /// assert_eq!(packet.mug_size, 240);
2195    /// assert_eq!(packet.temperature, 77);
2196    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2197    /// assert_eq!(suffix, &[8u8][..]);
2198    ///
2199    /// // These bytes are not valid instance of `Packet`.
2200    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2201    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2202    /// ```
2203    ///
2204    /// Since an explicit `count` is provided, this method supports types with
2205    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2206    /// which do not take an explicit count do not support such types.
2207    ///
2208    /// ```
2209    /// use core::num::NonZeroU16;
2210    /// use zerocopy::*;
2211    /// # use zerocopy_derive::*;
2212    ///
2213    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2214    /// #[repr(C)]
2215    /// struct ZSTy {
2216    ///     leading_sized: NonZeroU16,
2217    ///     trailing_dst: [()],
2218    /// }
2219    ///
2220    /// let src = 0xCAFEu16.as_bytes();
2221    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2222    /// assert_eq!(zsty.trailing_dst.len(), 42);
2223    /// ```
2224    ///
2225    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2226    #[must_use = "has no side effects"]
2227    #[inline]
2228    fn try_ref_from_prefix_with_elems(
2229        source: &[u8],
2230        count: usize,
2231    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2232    where
2233        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2234    {
2235        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2236    }
2237
2238    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2239    /// a DST length equal to `count`.
2240    ///
2241    /// This method attempts to return a reference to the suffix of `source`
2242    /// interpreted as a `Self` with `count` trailing elements, and a reference
2243    /// to the preceding bytes. If the length of `source` is less than the size
2244    /// of `Self` with `count` elements, if the suffix of `source` is not
2245    /// appropriately aligned, or if the suffix of `source` does not contain a
2246    /// valid instance of `Self`, this returns `Err`. If [`Self:
2247    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2248    /// error][ConvertError::from].
2249    ///
2250    /// [self-unaligned]: Unaligned
2251    /// [slice-dst]: KnownLayout#dynamically-sized-types
2252    ///
2253    /// # Examples
2254    ///
2255    /// ```
2256    /// # #![allow(non_camel_case_types)] // For C0::xC0
2257    /// use zerocopy::TryFromBytes;
2258    /// # use zerocopy_derive::*;
2259    ///
2260    /// // The only valid value of this type is the byte `0xC0`
2261    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2262    /// #[repr(u8)]
2263    /// enum C0 { xC0 = 0xC0 }
2264    ///
2265    /// // The only valid value of this type is the bytes `0xC0C0`.
2266    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2267    /// #[repr(C)]
2268    /// struct C0C0(C0, C0);
2269    ///
2270    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2271    /// #[repr(C)]
2272    /// struct Packet {
2273    ///     magic_number: C0C0,
2274    ///     mug_size: u8,
2275    ///     temperature: u8,
2276    ///     marshmallows: [[u8; 2]],
2277    /// }
2278    ///
2279    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2280    ///
2281    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2282    ///
2283    /// assert_eq!(packet.mug_size, 240);
2284    /// assert_eq!(packet.temperature, 77);
2285    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2286    /// assert_eq!(prefix, &[123u8][..]);
2287    ///
2288    /// // These bytes are not valid instance of `Packet`.
2289    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2290    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2291    /// ```
2292    ///
2293    /// Since an explicit `count` is provided, this method supports types with
2294    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2295    /// which do not take an explicit count do not support such types.
2296    ///
2297    /// ```
2298    /// use core::num::NonZeroU16;
2299    /// use zerocopy::*;
2300    /// # use zerocopy_derive::*;
2301    ///
2302    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2303    /// #[repr(C)]
2304    /// struct ZSTy {
2305    ///     leading_sized: NonZeroU16,
2306    ///     trailing_dst: [()],
2307    /// }
2308    ///
2309    /// let src = 0xCAFEu16.as_bytes();
2310    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2311    /// assert_eq!(zsty.trailing_dst.len(), 42);
2312    /// ```
2313    ///
2314    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2315    #[must_use = "has no side effects"]
2316    #[inline]
2317    fn try_ref_from_suffix_with_elems(
2318        source: &[u8],
2319        count: usize,
2320    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2321    where
2322        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2323    {
2324        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2325    }
2326
2327    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2328    /// length equal to `count`.
2329    ///
2330    /// This method attempts to return a reference to `source` interpreted as a
2331    /// `Self` with `count` trailing elements. If the length of `source` is not
2332    /// equal to the size of `Self` with `count` elements, if `source` is not
2333    /// appropriately aligned, or if `source` does not contain a valid instance
2334    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2335    /// you can [infallibly discard the alignment error][ConvertError::from].
2336    ///
2337    /// [self-unaligned]: Unaligned
2338    /// [slice-dst]: KnownLayout#dynamically-sized-types
2339    ///
2340    /// # Examples
2341    ///
2342    /// ```
2343    /// # #![allow(non_camel_case_types)] // For C0::xC0
2344    /// use zerocopy::TryFromBytes;
2345    /// # use zerocopy_derive::*;
2346    ///
2347    /// // The only valid value of this type is the byte `0xC0`
2348    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2349    /// #[repr(u8)]
2350    /// enum C0 { xC0 = 0xC0 }
2351    ///
2352    /// // The only valid value of this type is the bytes `0xC0C0`.
2353    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2354    /// #[repr(C)]
2355    /// struct C0C0(C0, C0);
2356    ///
2357    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2358    /// #[repr(C, packed)]
2359    /// struct Packet {
2360    ///     magic_number: C0C0,
2361    ///     mug_size: u8,
2362    ///     temperature: u8,
2363    ///     marshmallows: [[u8; 2]],
2364    /// }
2365    ///
2366    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2367    ///
2368    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2369    ///
2370    /// assert_eq!(packet.mug_size, 240);
2371    /// assert_eq!(packet.temperature, 77);
2372    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2373    ///
2374    /// packet.temperature = 111;
2375    ///
2376    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2377    ///
2378    /// // These bytes are not valid instance of `Packet`.
2379    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2380    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2381    /// ```
2382    ///
2383    /// Since an explicit `count` is provided, this method supports types with
2384    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2385    /// which do not take an explicit count do not support such types.
2386    ///
2387    /// ```
2388    /// use core::num::NonZeroU16;
2389    /// use zerocopy::*;
2390    /// # use zerocopy_derive::*;
2391    ///
2392    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2393    /// #[repr(C, packed)]
2394    /// struct ZSTy {
2395    ///     leading_sized: NonZeroU16,
2396    ///     trailing_dst: [()],
2397    /// }
2398    ///
2399    /// let mut src = 0xCAFEu16;
2400    /// let src = src.as_mut_bytes();
2401    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2402    /// assert_eq!(zsty.trailing_dst.len(), 42);
2403    /// ```
2404    ///
2405    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2406    #[must_use = "has no side effects"]
2407    #[inline]
2408    fn try_mut_from_bytes_with_elems(
2409        source: &mut [u8],
2410        count: usize,
2411    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2412    where
2413        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2414    {
2415        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2416        {
2417            Ok(source) => {
2418                // This call may panic. If that happens, it doesn't cause any soundness
2419                // issues, as we have not generated any invalid state which we need to
2420                // fix before returning.
2421                //
2422                // Note that one panic or post-monomorphization error condition is
2423                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2424                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2425                // condition will not happen.
2426                match source.try_into_valid() {
2427                    Ok(source) => Ok(source.as_mut()),
2428                    Err(e) => {
2429                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
2430                    }
2431                }
2432            }
2433            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2434        }
2435    }
2436
2437    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2438    /// with a DST length equal to `count`.
2439    ///
2440    /// This method attempts to return a reference to the prefix of `source`
2441    /// interpreted as a `Self` with `count` trailing elements, and a reference
2442    /// to the remaining bytes. If the length of `source` is less than the size
2443    /// of `Self` with `count` elements, if `source` is not appropriately
2444    /// aligned, or if the prefix of `source` does not contain a valid instance
2445    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2446    /// you can [infallibly discard the alignment error][ConvertError::from].
2447    ///
2448    /// [self-unaligned]: Unaligned
2449    /// [slice-dst]: KnownLayout#dynamically-sized-types
2450    ///
2451    /// # Examples
2452    ///
2453    /// ```
2454    /// # #![allow(non_camel_case_types)] // For C0::xC0
2455    /// use zerocopy::TryFromBytes;
2456    /// # use zerocopy_derive::*;
2457    ///
2458    /// // The only valid value of this type is the byte `0xC0`
2459    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2460    /// #[repr(u8)]
2461    /// enum C0 { xC0 = 0xC0 }
2462    ///
2463    /// // The only valid value of this type is the bytes `0xC0C0`.
2464    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2465    /// #[repr(C)]
2466    /// struct C0C0(C0, C0);
2467    ///
2468    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2469    /// #[repr(C, packed)]
2470    /// struct Packet {
2471    ///     magic_number: C0C0,
2472    ///     mug_size: u8,
2473    ///     temperature: u8,
2474    ///     marshmallows: [[u8; 2]],
2475    /// }
2476    ///
2477    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2478    ///
2479    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2480    ///
2481    /// assert_eq!(packet.mug_size, 240);
2482    /// assert_eq!(packet.temperature, 77);
2483    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2484    /// assert_eq!(suffix, &[8u8][..]);
2485    ///
2486    /// packet.temperature = 111;
2487    /// suffix[0] = 222;
2488    ///
2489    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2490    ///
2491    /// // These bytes are not valid instance of `Packet`.
2492    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2493    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2494    /// ```
2495    ///
2496    /// Since an explicit `count` is provided, this method supports types with
2497    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2498    /// which do not take an explicit count do not support such types.
2499    ///
2500    /// ```
2501    /// use core::num::NonZeroU16;
2502    /// use zerocopy::*;
2503    /// # use zerocopy_derive::*;
2504    ///
2505    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2506    /// #[repr(C, packed)]
2507    /// struct ZSTy {
2508    ///     leading_sized: NonZeroU16,
2509    ///     trailing_dst: [()],
2510    /// }
2511    ///
2512    /// let mut src = 0xCAFEu16;
2513    /// let src = src.as_mut_bytes();
2514    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2515    /// assert_eq!(zsty.trailing_dst.len(), 42);
2516    /// ```
2517    ///
2518    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2519    #[must_use = "has no side effects"]
2520    #[inline]
2521    fn try_mut_from_prefix_with_elems(
2522        source: &mut [u8],
2523        count: usize,
2524    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2525    where
2526        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2527    {
2528        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2529    }
2530
2531    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2532    /// with a DST length equal to `count`.
2533    ///
2534    /// This method attempts to return a reference to the suffix of `source`
2535    /// interpreted as a `Self` with `count` trailing elements, and a reference
2536    /// to the preceding bytes. If the length of `source` is less than the size
2537    /// of `Self` with `count` elements, if the suffix of `source` is not
2538    /// appropriately aligned, or if the suffix of `source` does not contain a
2539    /// valid instance of `Self`, this returns `Err`. If [`Self:
2540    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2541    /// error][ConvertError::from].
2542    ///
2543    /// [self-unaligned]: Unaligned
2544    /// [slice-dst]: KnownLayout#dynamically-sized-types
2545    ///
2546    /// # Examples
2547    ///
2548    /// ```
2549    /// # #![allow(non_camel_case_types)] // For C0::xC0
2550    /// use zerocopy::TryFromBytes;
2551    /// # use zerocopy_derive::*;
2552    ///
2553    /// // The only valid value of this type is the byte `0xC0`
2554    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2555    /// #[repr(u8)]
2556    /// enum C0 { xC0 = 0xC0 }
2557    ///
2558    /// // The only valid value of this type is the bytes `0xC0C0`.
2559    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2560    /// #[repr(C)]
2561    /// struct C0C0(C0, C0);
2562    ///
2563    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2564    /// #[repr(C, packed)]
2565    /// struct Packet {
2566    ///     magic_number: C0C0,
2567    ///     mug_size: u8,
2568    ///     temperature: u8,
2569    ///     marshmallows: [[u8; 2]],
2570    /// }
2571    ///
2572    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2573    ///
2574    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2575    ///
2576    /// assert_eq!(packet.mug_size, 240);
2577    /// assert_eq!(packet.temperature, 77);
2578    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2579    /// assert_eq!(prefix, &[123u8][..]);
2580    ///
2581    /// prefix[0] = 111;
2582    /// packet.temperature = 222;
2583    ///
2584    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2585    ///
2586    /// // These bytes are not valid instance of `Packet`.
2587    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2588    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2589    /// ```
2590    ///
2591    /// Since an explicit `count` is provided, this method supports types with
2592    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2593    /// which do not take an explicit count do not support such types.
2594    ///
2595    /// ```
2596    /// use core::num::NonZeroU16;
2597    /// use zerocopy::*;
2598    /// # use zerocopy_derive::*;
2599    ///
2600    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2601    /// #[repr(C, packed)]
2602    /// struct ZSTy {
2603    ///     leading_sized: NonZeroU16,
2604    ///     trailing_dst: [()],
2605    /// }
2606    ///
2607    /// let mut src = 0xCAFEu16;
2608    /// let src = src.as_mut_bytes();
2609    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2610    /// assert_eq!(zsty.trailing_dst.len(), 42);
2611    /// ```
2612    ///
2613    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2614    #[must_use = "has no side effects"]
2615    #[inline]
2616    fn try_mut_from_suffix_with_elems(
2617        source: &mut [u8],
2618        count: usize,
2619    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2620    where
2621        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2622    {
2623        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2624    }
2625
2626    /// Attempts to read the given `source` as a `Self`.
2627    ///
2628    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2629    /// instance of `Self`, this returns `Err`.
2630    ///
2631    /// # Examples
2632    ///
2633    /// ```
2634    /// use zerocopy::TryFromBytes;
2635    /// # use zerocopy_derive::*;
2636    ///
2637    /// // The only valid value of this type is the byte `0xC0`
2638    /// #[derive(TryFromBytes)]
2639    /// #[repr(u8)]
2640    /// enum C0 { xC0 = 0xC0 }
2641    ///
2642    /// // The only valid value of this type is the bytes `0xC0C0`.
2643    /// #[derive(TryFromBytes)]
2644    /// #[repr(C)]
2645    /// struct C0C0(C0, C0);
2646    ///
2647    /// #[derive(TryFromBytes)]
2648    /// #[repr(C)]
2649    /// struct Packet {
2650    ///     magic_number: C0C0,
2651    ///     mug_size: u8,
2652    ///     temperature: u8,
2653    /// }
2654    ///
2655    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2656    ///
2657    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2658    ///
2659    /// assert_eq!(packet.mug_size, 240);
2660    /// assert_eq!(packet.temperature, 77);
2661    ///
2662    /// // These bytes are not valid instance of `Packet`.
2663    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2664    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2665    /// ```
2666    #[must_use = "has no side effects"]
2667    #[inline]
2668    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2669    where
2670        Self: Sized,
2671    {
2672        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2673            Ok(candidate) => candidate,
2674            Err(e) => {
2675                return Err(TryReadError::Size(e.with_dst()));
2676            }
2677        };
2678        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2679        // its bytes are initialized.
2680        unsafe { try_read_from(source, candidate) }
2681    }
2682
2683    /// Attempts to read a `Self` from the prefix of the given `source`.
2684    ///
2685    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2686    /// of `source`, returning that `Self` and any remaining bytes. If
2687    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2688    /// of `Self`, it returns `Err`.
2689    ///
2690    /// # Examples
2691    ///
2692    /// ```
2693    /// use zerocopy::TryFromBytes;
2694    /// # use zerocopy_derive::*;
2695    ///
2696    /// // The only valid value of this type is the byte `0xC0`
2697    /// #[derive(TryFromBytes)]
2698    /// #[repr(u8)]
2699    /// enum C0 { xC0 = 0xC0 }
2700    ///
2701    /// // The only valid value of this type is the bytes `0xC0C0`.
2702    /// #[derive(TryFromBytes)]
2703    /// #[repr(C)]
2704    /// struct C0C0(C0, C0);
2705    ///
2706    /// #[derive(TryFromBytes)]
2707    /// #[repr(C)]
2708    /// struct Packet {
2709    ///     magic_number: C0C0,
2710    ///     mug_size: u8,
2711    ///     temperature: u8,
2712    /// }
2713    ///
2714    /// // These are more bytes than are needed to encode a `Packet`.
2715    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2716    ///
2717    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2718    ///
2719    /// assert_eq!(packet.mug_size, 240);
2720    /// assert_eq!(packet.temperature, 77);
2721    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2722    ///
2723    /// // These bytes are not valid instance of `Packet`.
2724    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2725    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2726    /// ```
2727    #[must_use = "has no side effects"]
2728    #[inline]
2729    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2730    where
2731        Self: Sized,
2732    {
2733        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2734            Ok(candidate) => candidate,
2735            Err(e) => {
2736                return Err(TryReadError::Size(e.with_dst()));
2737            }
2738        };
2739        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2740        // its bytes are initialized.
2741        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2742    }
2743
2744    /// Attempts to read a `Self` from the suffix of the given `source`.
2745    ///
2746    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2747    /// of `source`, returning that `Self` and any preceding bytes. If
2748    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2749    /// of `Self`, it returns `Err`.
2750    ///
2751    /// # Examples
2752    ///
2753    /// ```
2754    /// # #![allow(non_camel_case_types)] // For C0::xC0
2755    /// use zerocopy::TryFromBytes;
2756    /// # use zerocopy_derive::*;
2757    ///
2758    /// // The only valid value of this type is the byte `0xC0`
2759    /// #[derive(TryFromBytes)]
2760    /// #[repr(u8)]
2761    /// enum C0 { xC0 = 0xC0 }
2762    ///
2763    /// // The only valid value of this type is the bytes `0xC0C0`.
2764    /// #[derive(TryFromBytes)]
2765    /// #[repr(C)]
2766    /// struct C0C0(C0, C0);
2767    ///
2768    /// #[derive(TryFromBytes)]
2769    /// #[repr(C)]
2770    /// struct Packet {
2771    ///     magic_number: C0C0,
2772    ///     mug_size: u8,
2773    ///     temperature: u8,
2774    /// }
2775    ///
2776    /// // These are more bytes than are needed to encode a `Packet`.
2777    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2778    ///
2779    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2780    ///
2781    /// assert_eq!(packet.mug_size, 240);
2782    /// assert_eq!(packet.temperature, 77);
2783    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2784    ///
2785    /// // These bytes are not valid instance of `Packet`.
2786    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2787    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2788    /// ```
2789    #[must_use = "has no side effects"]
2790    #[inline]
2791    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2792    where
2793        Self: Sized,
2794    {
2795        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
2796            Ok(candidate) => candidate,
2797            Err(e) => {
2798                return Err(TryReadError::Size(e.with_dst()));
2799            }
2800        };
2801        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2802        // its bytes are initialized.
2803        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
2804    }
2805}
2806
2807#[inline(always)]
2808fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
2809    source: &[u8],
2810    cast_type: CastType,
2811    meta: Option<T::PointerMetadata>,
2812) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
2813    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
2814        Ok((source, prefix_suffix)) => {
2815            // This call may panic. If that happens, it doesn't cause any soundness
2816            // issues, as we have not generated any invalid state which we need to
2817            // fix before returning.
2818            //
2819            // Note that one panic or post-monomorphization error condition is
2820            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2821            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2822            // condition will not happen.
2823            match source.try_into_valid() {
2824                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
2825                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
2826            }
2827        }
2828        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2829    }
2830}
2831
2832#[inline(always)]
2833fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
2834    candidate: &mut [u8],
2835    cast_type: CastType,
2836    meta: Option<T::PointerMetadata>,
2837) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
2838    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
2839        Ok((candidate, prefix_suffix)) => {
2840            // This call may panic. If that happens, it doesn't cause any soundness
2841            // issues, as we have not generated any invalid state which we need to
2842            // fix before returning.
2843            //
2844            // Note that one panic or post-monomorphization error condition is
2845            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2846            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2847            // condition will not happen.
2848            match candidate.try_into_valid() {
2849                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
2850                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()),
2851            }
2852        }
2853        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2854    }
2855}
2856
2857#[inline(always)]
2858fn swap<T, U>((t, u): (T, U)) -> (U, T) {
2859    (u, t)
2860}
2861
2862/// # Safety
2863///
2864/// All bytes of `candidate` must be initialized.
2865#[inline(always)]
2866unsafe fn try_read_from<S, T: TryFromBytes>(
2867    source: S,
2868    mut candidate: CoreMaybeUninit<T>,
2869) -> Result<T, TryReadError<S, T>> {
2870    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
2871    // to add a `T: Immutable` bound.
2872    let c_ptr = Ptr::from_mut(&mut candidate);
2873    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
2874    // `candidate`, which the caller promises is entirely initialized. Since
2875    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
2876    // no values written to an `Initialized` `c_ptr` can violate its validity.
2877    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
2878    // via `c_ptr` so long as it is live, so we don't need to worry about the
2879    // fact that `c_ptr` may have more restricted validity than `candidate`.
2880    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
2881    let c_ptr = c_ptr.transmute();
2882
2883    // Since we don't have `T: KnownLayout`, we hack around that by using
2884    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
2885    //
2886    // This call may panic. If that happens, it doesn't cause any soundness
2887    // issues, as we have not generated any invalid state which we need to fix
2888    // before returning.
2889    //
2890    // Note that one panic or post-monomorphization error condition is calling
2891    // `try_into_valid` (and thus `is_bit_valid`) with a shared pointer when
2892    // `Self: !Immutable`. Since `Self: Immutable`, this panic condition will
2893    // not happen.
2894    if !Wrapping::<T>::is_bit_valid(c_ptr.forget_aligned()) {
2895        return Err(ValidityError::new(source).into());
2896    }
2897
2898    fn _assert_same_size_and_validity<T>()
2899    where
2900        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
2901        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
2902    {
2903    }
2904
2905    _assert_same_size_and_validity::<T>();
2906
2907    // SAFETY: We just validated that `candidate` contains a valid
2908    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
2909    // guaranteed by the preceding type assertion.
2910    Ok(unsafe { candidate.assume_init() })
2911}
2912
2913/// Types for which a sequence of `0` bytes is a valid instance.
2914///
2915/// Any memory region of the appropriate length which is guaranteed to contain
2916/// only zero bytes can be viewed as any `FromZeros` type with no runtime
2917/// overhead. This is useful whenever memory is known to be in a zeroed state,
2918/// such memory returned from some allocation routines.
2919///
2920/// # Warning: Padding bytes
2921///
2922/// Note that, when a value is moved or copied, only the non-padding bytes of
2923/// that value are guaranteed to be preserved. It is unsound to assume that
2924/// values written to padding bytes are preserved after a move or copy. For more
2925/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
2926///
2927/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
2928///
2929/// # Implementation
2930///
2931/// **Do not implement this trait yourself!** Instead, use
2932/// [`#[derive(FromZeros)]`][derive]; e.g.:
2933///
2934/// ```
2935/// # use zerocopy_derive::{FromZeros, Immutable};
2936/// #[derive(FromZeros)]
2937/// struct MyStruct {
2938/// # /*
2939///     ...
2940/// # */
2941/// }
2942///
2943/// #[derive(FromZeros)]
2944/// #[repr(u8)]
2945/// enum MyEnum {
2946/// #   Variant0,
2947/// # /*
2948///     ...
2949/// # */
2950/// }
2951///
2952/// #[derive(FromZeros, Immutable)]
2953/// union MyUnion {
2954/// #   variant: u8,
2955/// # /*
2956///     ...
2957/// # */
2958/// }
2959/// ```
2960///
2961/// This derive performs a sophisticated, compile-time safety analysis to
2962/// determine whether a type is `FromZeros`.
2963///
2964/// # Safety
2965///
2966/// *This section describes what is required in order for `T: FromZeros`, and
2967/// what unsafe code may assume of such types. If you don't plan on implementing
2968/// `FromZeros` manually, and you don't plan on writing unsafe code that
2969/// operates on `FromZeros` types, then you don't need to read this section.*
2970///
2971/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
2972/// `T` whose bytes are all initialized to zero. If a type is marked as
2973/// `FromZeros` which violates this contract, it may cause undefined behavior.
2974///
2975/// `#[derive(FromZeros)]` only permits [types which satisfy these
2976/// requirements][derive-analysis].
2977///
2978#[cfg_attr(
2979    feature = "derive",
2980    doc = "[derive]: zerocopy_derive::FromZeros",
2981    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
2982)]
2983#[cfg_attr(
2984    not(feature = "derive"),
2985    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
2986    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
2987)]
2988#[cfg_attr(
2989    zerocopy_diagnostic_on_unimplemented_1_78_0,
2990    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
2991)]
2992pub unsafe trait FromZeros: TryFromBytes {
2993    // The `Self: Sized` bound makes it so that `FromZeros` is still object
2994    // safe.
2995    #[doc(hidden)]
2996    fn only_derive_is_allowed_to_implement_this_trait()
2997    where
2998        Self: Sized;
2999
3000    /// Overwrites `self` with zeros.
3001    ///
3002    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3003    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3004    /// drop the current value and replace it with a new one — it simply
3005    /// modifies the bytes of the existing value.
3006    ///
3007    /// # Examples
3008    ///
3009    /// ```
3010    /// # use zerocopy::FromZeros;
3011    /// # use zerocopy_derive::*;
3012    /// #
3013    /// #[derive(FromZeros)]
3014    /// #[repr(C)]
3015    /// struct PacketHeader {
3016    ///     src_port: [u8; 2],
3017    ///     dst_port: [u8; 2],
3018    ///     length: [u8; 2],
3019    ///     checksum: [u8; 2],
3020    /// }
3021    ///
3022    /// let mut header = PacketHeader {
3023    ///     src_port: 100u16.to_be_bytes(),
3024    ///     dst_port: 200u16.to_be_bytes(),
3025    ///     length: 300u16.to_be_bytes(),
3026    ///     checksum: 400u16.to_be_bytes(),
3027    /// };
3028    ///
3029    /// header.zero();
3030    ///
3031    /// assert_eq!(header.src_port, [0, 0]);
3032    /// assert_eq!(header.dst_port, [0, 0]);
3033    /// assert_eq!(header.length, [0, 0]);
3034    /// assert_eq!(header.checksum, [0, 0]);
3035    /// ```
3036    #[inline(always)]
3037    fn zero(&mut self) {
3038        let slf: *mut Self = self;
3039        let len = mem::size_of_val(self);
3040        // SAFETY:
3041        // - `self` is guaranteed by the type system to be valid for writes of
3042        //   size `size_of_val(self)`.
3043        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3044        //   as required by `u8`.
3045        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3046        //   of `Self.`
3047        //
3048        // TODO(#429): Add references to docs and quotes.
3049        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3050    }
3051
3052    /// Creates an instance of `Self` from zeroed bytes.
3053    ///
3054    /// # Examples
3055    ///
3056    /// ```
3057    /// # use zerocopy::FromZeros;
3058    /// # use zerocopy_derive::*;
3059    /// #
3060    /// #[derive(FromZeros)]
3061    /// #[repr(C)]
3062    /// struct PacketHeader {
3063    ///     src_port: [u8; 2],
3064    ///     dst_port: [u8; 2],
3065    ///     length: [u8; 2],
3066    ///     checksum: [u8; 2],
3067    /// }
3068    ///
3069    /// let header: PacketHeader = FromZeros::new_zeroed();
3070    ///
3071    /// assert_eq!(header.src_port, [0, 0]);
3072    /// assert_eq!(header.dst_port, [0, 0]);
3073    /// assert_eq!(header.length, [0, 0]);
3074    /// assert_eq!(header.checksum, [0, 0]);
3075    /// ```
3076    #[must_use = "has no side effects"]
3077    #[inline(always)]
3078    fn new_zeroed() -> Self
3079    where
3080        Self: Sized,
3081    {
3082        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3083        unsafe { mem::zeroed() }
3084    }
3085
3086    /// Creates a `Box<Self>` from zeroed bytes.
3087    ///
3088    /// This function is useful for allocating large values on the heap and
3089    /// zero-initializing them, without ever creating a temporary instance of
3090    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3091    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3092    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3093    ///
3094    /// On systems that use a heap implementation that supports allocating from
3095    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3096    /// have performance benefits.
3097    ///
3098    /// # Errors
3099    ///
3100    /// Returns an error on allocation failure. Allocation failure is guaranteed
3101    /// never to cause a panic or an abort.
3102    #[must_use = "has no side effects (other than allocation)"]
3103    #[cfg(any(feature = "alloc", test))]
3104    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3105    #[inline]
3106    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3107    where
3108        Self: Sized,
3109    {
3110        // If `T` is a ZST, then return a proper boxed instance of it. There is
3111        // no allocation, but `Box` does require a correct dangling pointer.
3112        let layout = Layout::new::<Self>();
3113        if layout.size() == 0 {
3114            // Construct the `Box` from a dangling pointer to avoid calling
3115            // `Self::new_zeroed`. This ensures that stack space is never
3116            // allocated for `Self` even on lower opt-levels where this branch
3117            // might not get optimized out.
3118
3119            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3120            // requirements are that the pointer is non-null and sufficiently
3121            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3122            // is sufficiently aligned. Since the produced pointer is a
3123            // `NonNull`, it is non-null.
3124            //
3125            // [1] Per https://doc.rust-lang.org/nightly/std/boxed/index.html#memory-layout:
3126            //
3127            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3128            //
3129            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3130            //
3131            //   Creates a new `NonNull` that is dangling, but well-aligned.
3132            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3133        }
3134
3135        // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
3136        #[allow(clippy::undocumented_unsafe_blocks)]
3137        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3138        if ptr.is_null() {
3139            return Err(AllocError);
3140        }
3141        // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
3142        #[allow(clippy::undocumented_unsafe_blocks)]
3143        Ok(unsafe { Box::from_raw(ptr) })
3144    }
3145
3146    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3147    ///
3148    /// This function is useful for allocating large values of `[Self]` on the
3149    /// heap and zero-initializing them, without ever creating a temporary
3150    /// instance of `[Self; _]` on the stack. For example,
3151    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3152    /// the heap; it does not require storing the slice on the stack.
3153    ///
3154    /// On systems that use a heap implementation that supports allocating from
3155    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3156    /// benefits.
3157    ///
3158    /// If `Self` is a zero-sized type, then this function will return a
3159    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3160    /// actual information, but its `len()` property will report the correct
3161    /// value.
3162    ///
3163    /// # Errors
3164    ///
3165    /// Returns an error on allocation failure. Allocation failure is
3166    /// guaranteed never to cause a panic or an abort.
3167    #[must_use = "has no side effects (other than allocation)"]
3168    #[cfg(feature = "alloc")]
3169    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3170    #[inline]
3171    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3172    where
3173        Self: KnownLayout<PointerMetadata = usize>,
3174    {
3175        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3176        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3177        // (and, consequently, the `Box` derived from it) is a valid instance of
3178        // `Self`, because `Self` is `FromZeros`.
3179        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3180    }
3181
3182    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3183    #[doc(hidden)]
3184    #[cfg(feature = "alloc")]
3185    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3186    #[must_use = "has no side effects (other than allocation)"]
3187    #[inline(always)]
3188    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3189    where
3190        Self: Sized,
3191    {
3192        <[Self]>::new_box_zeroed_with_elems(len)
3193    }
3194
3195    /// Creates a `Vec<Self>` from zeroed bytes.
3196    ///
3197    /// This function is useful for allocating large values of `Vec`s and
3198    /// zero-initializing them, without ever creating a temporary instance of
3199    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3200    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3201    /// heap; it does not require storing intermediate values on the stack.
3202    ///
3203    /// On systems that use a heap implementation that supports allocating from
3204    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3205    ///
3206    /// If `Self` is a zero-sized type, then this function will return a
3207    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3208    /// actual information, but its `len()` property will report the correct
3209    /// value.
3210    ///
3211    /// # Errors
3212    ///
3213    /// Returns an error on allocation failure. Allocation failure is
3214    /// guaranteed never to cause a panic or an abort.
3215    #[must_use = "has no side effects (other than allocation)"]
3216    #[cfg(feature = "alloc")]
3217    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3218    #[inline(always)]
3219    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3220    where
3221        Self: Sized,
3222    {
3223        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3224    }
3225
3226    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3227    /// the vector. The new items are initialized with zeros.
3228    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3229    #[cfg(feature = "alloc")]
3230    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3231    #[inline(always)]
3232    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3233    where
3234        Self: Sized,
3235    {
3236        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3237        // panic condition is not satisfied.
3238        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3239    }
3240
3241    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3242    /// items are initialized with zeros.
3243    ///
3244    /// # Panics
3245    ///
3246    /// Panics if `position > v.len()`.
3247    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
3248    #[cfg(feature = "alloc")]
3249    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3250    #[inline]
3251    fn insert_vec_zeroed(
3252        v: &mut Vec<Self>,
3253        position: usize,
3254        additional: usize,
3255    ) -> Result<(), AllocError>
3256    where
3257        Self: Sized,
3258    {
3259        assert!(position <= v.len());
3260        // We only conditionally compile on versions on which `try_reserve` is
3261        // stable; the Clippy lint is a false positive.
3262        v.try_reserve(additional).map_err(|_| AllocError)?;
3263        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3264        // * `ptr.add(position)`
3265        // * `position + additional`
3266        // * `v.len() + additional`
3267        //
3268        // `v.len() - position` cannot overflow because we asserted that
3269        // `position <= v.len()`.
3270        unsafe {
3271            // This is a potentially overlapping copy.
3272            let ptr = v.as_mut_ptr();
3273            #[allow(clippy::arithmetic_side_effects)]
3274            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3275            ptr.add(position).write_bytes(0, additional);
3276            #[allow(clippy::arithmetic_side_effects)]
3277            v.set_len(v.len() + additional);
3278        }
3279
3280        Ok(())
3281    }
3282}
3283
3284/// Analyzes whether a type is [`FromBytes`].
3285///
3286/// This derive analyzes, at compile time, whether the annotated type satisfies
3287/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3288/// supertraits if it is sound to do so. This derive can be applied to structs,
3289/// enums, and unions;
3290/// e.g.:
3291///
3292/// ```
3293/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3294/// #[derive(FromBytes)]
3295/// struct MyStruct {
3296/// # /*
3297///     ...
3298/// # */
3299/// }
3300///
3301/// #[derive(FromBytes)]
3302/// #[repr(u8)]
3303/// enum MyEnum {
3304/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3305/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3306/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3307/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3308/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3309/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3310/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3311/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3312/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3313/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3314/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3315/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3316/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3317/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3318/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3319/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3320/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3321/// #   VFF,
3322/// # /*
3323///     ...
3324/// # */
3325/// }
3326///
3327/// #[derive(FromBytes, Immutable)]
3328/// union MyUnion {
3329/// #   variant: u8,
3330/// # /*
3331///     ...
3332/// # */
3333/// }
3334/// ```
3335///
3336/// [safety conditions]: trait@FromBytes#safety
3337///
3338/// # Analysis
3339///
3340/// *This section describes, roughly, the analysis performed by this derive to
3341/// determine whether it is sound to implement `FromBytes` for a given type.
3342/// Unless you are modifying the implementation of this derive, or attempting to
3343/// manually implement `FromBytes` for a type yourself, you don't need to read
3344/// this section.*
3345///
3346/// If a type has the following properties, then this derive can implement
3347/// `FromBytes` for that type:
3348///
3349/// - If the type is a struct, all of its fields must be `FromBytes`.
3350/// - If the type is an enum:
3351///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
3352///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
3353///   - The maximum number of discriminants must be used (so that every possible
3354///     bit pattern is a valid one). Be very careful when using the `C`,
3355///     `usize`, or `isize` representations, as their size is
3356///     platform-dependent.
3357///   - Its fields must be `FromBytes`.
3358///
3359/// This analysis is subject to change. Unsafe code may *only* rely on the
3360/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3361/// implementation details of this derive.
3362///
3363/// ## Why isn't an explicit representation required for structs?
3364///
3365/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3366/// that structs are marked with `#[repr(C)]`.
3367///
3368/// Per the [Rust reference](reference),
3369///
3370/// > The representation of a type can change the padding between fields, but
3371/// > does not change the layout of the fields themselves.
3372///
3373/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3374///
3375/// Since the layout of structs only consists of padding bytes and field bytes,
3376/// a struct is soundly `FromBytes` if:
3377/// 1. its padding is soundly `FromBytes`, and
3378/// 2. its fields are soundly `FromBytes`.
3379///
3380/// The answer to the first question is always yes: padding bytes do not have
3381/// any validity constraints. A [discussion] of this question in the Unsafe Code
3382/// Guidelines Working Group concluded that it would be virtually unimaginable
3383/// for future versions of rustc to add validity constraints to padding bytes.
3384///
3385/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3386///
3387/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3388/// its fields are `FromBytes`.
3389// TODO(#146): Document why we don't require an enum to have an explicit `repr`
3390// attribute.
3391#[cfg(any(feature = "derive", test))]
3392#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3393pub use zerocopy_derive::FromBytes;
3394
3395/// Types for which any bit pattern is valid.
3396///
3397/// Any memory region of the appropriate length which contains initialized bytes
3398/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3399/// useful for efficiently parsing bytes as structured data.
3400///
3401/// # Warning: Padding bytes
3402///
3403/// Note that, when a value is moved or copied, only the non-padding bytes of
3404/// that value are guaranteed to be preserved. It is unsound to assume that
3405/// values written to padding bytes are preserved after a move or copy. For
3406/// example, the following is unsound:
3407///
3408/// ```rust,no_run
3409/// use core::mem::{size_of, transmute};
3410/// use zerocopy::FromZeros;
3411/// # use zerocopy_derive::*;
3412///
3413/// // Assume `Foo` is a type with padding bytes.
3414/// #[derive(FromZeros, Default)]
3415/// struct Foo {
3416/// # /*
3417///     ...
3418/// # */
3419/// }
3420///
3421/// let mut foo: Foo = Foo::default();
3422/// FromZeros::zero(&mut foo);
3423/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3424/// // those writes are not guaranteed to be preserved in padding bytes when
3425/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3426/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3427/// ```
3428///
3429/// # Implementation
3430///
3431/// **Do not implement this trait yourself!** Instead, use
3432/// [`#[derive(FromBytes)]`][derive]; e.g.:
3433///
3434/// ```
3435/// # use zerocopy_derive::{FromBytes, Immutable};
3436/// #[derive(FromBytes)]
3437/// struct MyStruct {
3438/// # /*
3439///     ...
3440/// # */
3441/// }
3442///
3443/// #[derive(FromBytes)]
3444/// #[repr(u8)]
3445/// enum MyEnum {
3446/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3447/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3448/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3449/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3450/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3451/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3452/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3453/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3454/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3455/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3456/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3457/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3458/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3459/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3460/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3461/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3462/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3463/// #   VFF,
3464/// # /*
3465///     ...
3466/// # */
3467/// }
3468///
3469/// #[derive(FromBytes, Immutable)]
3470/// union MyUnion {
3471/// #   variant: u8,
3472/// # /*
3473///     ...
3474/// # */
3475/// }
3476/// ```
3477///
3478/// This derive performs a sophisticated, compile-time safety analysis to
3479/// determine whether a type is `FromBytes`.
3480///
3481/// # Safety
3482///
3483/// *This section describes what is required in order for `T: FromBytes`, and
3484/// what unsafe code may assume of such types. If you don't plan on implementing
3485/// `FromBytes` manually, and you don't plan on writing unsafe code that
3486/// operates on `FromBytes` types, then you don't need to read this section.*
3487///
3488/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3489/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3490/// words, any byte value which is not uninitialized). If a type is marked as
3491/// `FromBytes` which violates this contract, it may cause undefined behavior.
3492///
3493/// `#[derive(FromBytes)]` only permits [types which satisfy these
3494/// requirements][derive-analysis].
3495///
3496#[cfg_attr(
3497    feature = "derive",
3498    doc = "[derive]: zerocopy_derive::FromBytes",
3499    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3500)]
3501#[cfg_attr(
3502    not(feature = "derive"),
3503    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3504    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3505)]
3506#[cfg_attr(
3507    zerocopy_diagnostic_on_unimplemented_1_78_0,
3508    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3509)]
3510pub unsafe trait FromBytes: FromZeros {
3511    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3512    // safe.
3513    #[doc(hidden)]
3514    fn only_derive_is_allowed_to_implement_this_trait()
3515    where
3516        Self: Sized;
3517
3518    /// Interprets the given `source` as a `&Self`.
3519    ///
3520    /// This method attempts to return a reference to `source` interpreted as a
3521    /// `Self`. If the length of `source` is not a [valid size of
3522    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3523    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3524    /// [infallibly discard the alignment error][size-error-from].
3525    ///
3526    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3527    ///
3528    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3529    /// [self-unaligned]: Unaligned
3530    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3531    /// [slice-dst]: KnownLayout#dynamically-sized-types
3532    ///
3533    /// # Compile-Time Assertions
3534    ///
3535    /// This method cannot yet be used on unsized types whose dynamically-sized
3536    /// component is zero-sized. Attempting to use this method on such types
3537    /// results in a compile-time assertion error; e.g.:
3538    ///
3539    /// ```compile_fail,E0080
3540    /// use zerocopy::*;
3541    /// # use zerocopy_derive::*;
3542    ///
3543    /// #[derive(FromBytes, Immutable, KnownLayout)]
3544    /// #[repr(C)]
3545    /// struct ZSTy {
3546    ///     leading_sized: u16,
3547    ///     trailing_dst: [()],
3548    /// }
3549    ///
3550    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3551    /// ```
3552    ///
3553    /// # Examples
3554    ///
3555    /// ```
3556    /// use zerocopy::FromBytes;
3557    /// # use zerocopy_derive::*;
3558    ///
3559    /// #[derive(FromBytes, KnownLayout, Immutable)]
3560    /// #[repr(C)]
3561    /// struct PacketHeader {
3562    ///     src_port: [u8; 2],
3563    ///     dst_port: [u8; 2],
3564    ///     length: [u8; 2],
3565    ///     checksum: [u8; 2],
3566    /// }
3567    ///
3568    /// #[derive(FromBytes, KnownLayout, Immutable)]
3569    /// #[repr(C)]
3570    /// struct Packet {
3571    ///     header: PacketHeader,
3572    ///     body: [u8],
3573    /// }
3574    ///
3575    /// // These bytes encode a `Packet`.
3576    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3577    ///
3578    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3579    ///
3580    /// assert_eq!(packet.header.src_port, [0, 1]);
3581    /// assert_eq!(packet.header.dst_port, [2, 3]);
3582    /// assert_eq!(packet.header.length, [4, 5]);
3583    /// assert_eq!(packet.header.checksum, [6, 7]);
3584    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3585    /// ```
3586    #[must_use = "has no side effects"]
3587    #[inline]
3588    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3589    where
3590        Self: KnownLayout + Immutable,
3591    {
3592        static_assert_dst_is_not_zst!(Self);
3593        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3594            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3595            Err(err) => Err(err.map_src(|src| src.as_ref())),
3596        }
3597    }
3598
3599    /// Interprets the prefix of the given `source` as a `&Self` without
3600    /// copying.
3601    ///
3602    /// This method computes the [largest possible size of `Self`][valid-size]
3603    /// that can fit in the leading bytes of `source`, then attempts to return
3604    /// both a reference to those bytes interpreted as a `Self`, and a reference
3605    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3606    /// is not appropriately aligned, this returns `Err`. If [`Self:
3607    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3608    /// error][size-error-from].
3609    ///
3610    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3611    ///
3612    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3613    /// [self-unaligned]: Unaligned
3614    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3615    /// [slice-dst]: KnownLayout#dynamically-sized-types
3616    ///
3617    /// # Compile-Time Assertions
3618    ///
3619    /// This method cannot yet be used on unsized types whose dynamically-sized
3620    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3621    /// support such types. Attempting to use this method on such types results
3622    /// in a compile-time assertion error; e.g.:
3623    ///
3624    /// ```compile_fail,E0080
3625    /// use zerocopy::*;
3626    /// # use zerocopy_derive::*;
3627    ///
3628    /// #[derive(FromBytes, Immutable, KnownLayout)]
3629    /// #[repr(C)]
3630    /// struct ZSTy {
3631    ///     leading_sized: u16,
3632    ///     trailing_dst: [()],
3633    /// }
3634    ///
3635    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3636    /// ```
3637    ///
3638    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3639    ///
3640    /// # Examples
3641    ///
3642    /// ```
3643    /// use zerocopy::FromBytes;
3644    /// # use zerocopy_derive::*;
3645    ///
3646    /// #[derive(FromBytes, KnownLayout, Immutable)]
3647    /// #[repr(C)]
3648    /// struct PacketHeader {
3649    ///     src_port: [u8; 2],
3650    ///     dst_port: [u8; 2],
3651    ///     length: [u8; 2],
3652    ///     checksum: [u8; 2],
3653    /// }
3654    ///
3655    /// #[derive(FromBytes, KnownLayout, Immutable)]
3656    /// #[repr(C)]
3657    /// struct Packet {
3658    ///     header: PacketHeader,
3659    ///     body: [[u8; 2]],
3660    /// }
3661    ///
3662    /// // These are more bytes than are needed to encode a `Packet`.
3663    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3664    ///
3665    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3666    ///
3667    /// assert_eq!(packet.header.src_port, [0, 1]);
3668    /// assert_eq!(packet.header.dst_port, [2, 3]);
3669    /// assert_eq!(packet.header.length, [4, 5]);
3670    /// assert_eq!(packet.header.checksum, [6, 7]);
3671    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3672    /// assert_eq!(suffix, &[14u8][..]);
3673    /// ```
3674    #[must_use = "has no side effects"]
3675    #[inline]
3676    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3677    where
3678        Self: KnownLayout + Immutable,
3679    {
3680        static_assert_dst_is_not_zst!(Self);
3681        ref_from_prefix_suffix(source, None, CastType::Prefix)
3682    }
3683
3684    /// Interprets the suffix of the given bytes as a `&Self`.
3685    ///
3686    /// This method computes the [largest possible size of `Self`][valid-size]
3687    /// that can fit in the trailing bytes of `source`, then attempts to return
3688    /// both a reference to those bytes interpreted as a `Self`, and a reference
3689    /// to the preceding bytes. If there are insufficient bytes, or if that
3690    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3691    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3692    /// alignment error][size-error-from].
3693    ///
3694    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3695    ///
3696    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3697    /// [self-unaligned]: Unaligned
3698    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3699    /// [slice-dst]: KnownLayout#dynamically-sized-types
3700    ///
3701    /// # Compile-Time Assertions
3702    ///
3703    /// This method cannot yet be used on unsized types whose dynamically-sized
3704    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3705    /// support such types. Attempting to use this method on such types results
3706    /// in a compile-time assertion error; e.g.:
3707    ///
3708    /// ```compile_fail,E0080
3709    /// use zerocopy::*;
3710    /// # use zerocopy_derive::*;
3711    ///
3712    /// #[derive(FromBytes, Immutable, KnownLayout)]
3713    /// #[repr(C)]
3714    /// struct ZSTy {
3715    ///     leading_sized: u16,
3716    ///     trailing_dst: [()],
3717    /// }
3718    ///
3719    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3720    /// ```
3721    ///
3722    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3723    ///
3724    /// # Examples
3725    ///
3726    /// ```
3727    /// use zerocopy::FromBytes;
3728    /// # use zerocopy_derive::*;
3729    ///
3730    /// #[derive(FromBytes, Immutable, KnownLayout)]
3731    /// #[repr(C)]
3732    /// struct PacketTrailer {
3733    ///     frame_check_sequence: [u8; 4],
3734    /// }
3735    ///
3736    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3737    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3738    ///
3739    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3740    ///
3741    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3742    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3743    /// ```
3744    #[must_use = "has no side effects"]
3745    #[inline]
3746    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3747    where
3748        Self: Immutable + KnownLayout,
3749    {
3750        static_assert_dst_is_not_zst!(Self);
3751        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3752    }
3753
3754    /// Interprets the given `source` as a `&mut Self`.
3755    ///
3756    /// This method attempts to return a reference to `source` interpreted as a
3757    /// `Self`. If the length of `source` is not a [valid size of
3758    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3759    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3760    /// [infallibly discard the alignment error][size-error-from].
3761    ///
3762    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3763    ///
3764    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3765    /// [self-unaligned]: Unaligned
3766    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3767    /// [slice-dst]: KnownLayout#dynamically-sized-types
3768    ///
3769    /// # Compile-Time Assertions
3770    ///
3771    /// This method cannot yet be used on unsized types whose dynamically-sized
3772    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3773    /// support such types. Attempting to use this method on such types results
3774    /// in a compile-time assertion error; e.g.:
3775    ///
3776    /// ```compile_fail,E0080
3777    /// use zerocopy::*;
3778    /// # use zerocopy_derive::*;
3779    ///
3780    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3781    /// #[repr(C, packed)]
3782    /// struct ZSTy {
3783    ///     leading_sized: [u8; 2],
3784    ///     trailing_dst: [()],
3785    /// }
3786    ///
3787    /// let mut source = [85, 85];
3788    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
3789    /// ```
3790    ///
3791    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3792    ///
3793    /// # Examples
3794    ///
3795    /// ```
3796    /// use zerocopy::FromBytes;
3797    /// # use zerocopy_derive::*;
3798    ///
3799    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3800    /// #[repr(C)]
3801    /// struct PacketHeader {
3802    ///     src_port: [u8; 2],
3803    ///     dst_port: [u8; 2],
3804    ///     length: [u8; 2],
3805    ///     checksum: [u8; 2],
3806    /// }
3807    ///
3808    /// // These bytes encode a `PacketHeader`.
3809    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3810    ///
3811    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3812    ///
3813    /// assert_eq!(header.src_port, [0, 1]);
3814    /// assert_eq!(header.dst_port, [2, 3]);
3815    /// assert_eq!(header.length, [4, 5]);
3816    /// assert_eq!(header.checksum, [6, 7]);
3817    ///
3818    /// header.checksum = [0, 0];
3819    ///
3820    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
3821    /// ```
3822    #[must_use = "has no side effects"]
3823    #[inline]
3824    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
3825    where
3826        Self: IntoBytes + KnownLayout,
3827    {
3828        static_assert_dst_is_not_zst!(Self);
3829        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
3830            Ok(ptr) => Ok(ptr.recall_validity().as_mut()),
3831            Err(err) => Err(err.map_src(|src| src.as_mut())),
3832        }
3833    }
3834
3835    /// Interprets the prefix of the given `source` as a `&mut Self` without
3836    /// copying.
3837    ///
3838    /// This method computes the [largest possible size of `Self`][valid-size]
3839    /// that can fit in the leading bytes of `source`, then attempts to return
3840    /// both a reference to those bytes interpreted as a `Self`, and a reference
3841    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3842    /// is not appropriately aligned, this returns `Err`. If [`Self:
3843    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3844    /// error][size-error-from].
3845    ///
3846    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3847    ///
3848    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3849    /// [self-unaligned]: Unaligned
3850    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3851    /// [slice-dst]: KnownLayout#dynamically-sized-types
3852    ///
3853    /// # Compile-Time Assertions
3854    ///
3855    /// This method cannot yet be used on unsized types whose dynamically-sized
3856    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
3857    /// support such types. Attempting to use this method on such types results
3858    /// in a compile-time assertion error; e.g.:
3859    ///
3860    /// ```compile_fail,E0080
3861    /// use zerocopy::*;
3862    /// # use zerocopy_derive::*;
3863    ///
3864    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3865    /// #[repr(C, packed)]
3866    /// struct ZSTy {
3867    ///     leading_sized: [u8; 2],
3868    ///     trailing_dst: [()],
3869    /// }
3870    ///
3871    /// let mut source = [85, 85];
3872    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
3873    /// ```
3874    ///
3875    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
3876    ///
3877    /// # Examples
3878    ///
3879    /// ```
3880    /// use zerocopy::FromBytes;
3881    /// # use zerocopy_derive::*;
3882    ///
3883    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3884    /// #[repr(C)]
3885    /// struct PacketHeader {
3886    ///     src_port: [u8; 2],
3887    ///     dst_port: [u8; 2],
3888    ///     length: [u8; 2],
3889    ///     checksum: [u8; 2],
3890    /// }
3891    ///
3892    /// // These are more bytes than are needed to encode a `PacketHeader`.
3893    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3894    ///
3895    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
3896    ///
3897    /// assert_eq!(header.src_port, [0, 1]);
3898    /// assert_eq!(header.dst_port, [2, 3]);
3899    /// assert_eq!(header.length, [4, 5]);
3900    /// assert_eq!(header.checksum, [6, 7]);
3901    /// assert_eq!(body, &[8, 9][..]);
3902    ///
3903    /// header.checksum = [0, 0];
3904    /// body.fill(1);
3905    ///
3906    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
3907    /// ```
3908    #[must_use = "has no side effects"]
3909    #[inline]
3910    fn mut_from_prefix(
3911        source: &mut [u8],
3912    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
3913    where
3914        Self: IntoBytes + KnownLayout,
3915    {
3916        static_assert_dst_is_not_zst!(Self);
3917        mut_from_prefix_suffix(source, None, CastType::Prefix)
3918    }
3919
3920    /// Interprets the suffix of the given `source` as a `&mut Self` without
3921    /// copying.
3922    ///
3923    /// This method computes the [largest possible size of `Self`][valid-size]
3924    /// that can fit in the trailing bytes of `source`, then attempts to return
3925    /// both a reference to those bytes interpreted as a `Self`, and a reference
3926    /// to the preceding bytes. If there are insufficient bytes, or if that
3927    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3928    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3929    /// alignment error][size-error-from].
3930    ///
3931    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3932    ///
3933    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3934    /// [self-unaligned]: Unaligned
3935    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3936    /// [slice-dst]: KnownLayout#dynamically-sized-types
3937    ///
3938    /// # Compile-Time Assertions
3939    ///
3940    /// This method cannot yet be used on unsized types whose dynamically-sized
3941    /// component is zero-sized. Attempting to use this method on such types
3942    /// results in a compile-time assertion error; e.g.:
3943    ///
3944    /// ```compile_fail,E0080
3945    /// use zerocopy::*;
3946    /// # use zerocopy_derive::*;
3947    ///
3948    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3949    /// #[repr(C, packed)]
3950    /// struct ZSTy {
3951    ///     leading_sized: [u8; 2],
3952    ///     trailing_dst: [()],
3953    /// }
3954    ///
3955    /// let mut source = [85, 85];
3956    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
3957    /// ```
3958    ///
3959    /// # Examples
3960    ///
3961    /// ```
3962    /// use zerocopy::FromBytes;
3963    /// # use zerocopy_derive::*;
3964    ///
3965    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3966    /// #[repr(C)]
3967    /// struct PacketTrailer {
3968    ///     frame_check_sequence: [u8; 4],
3969    /// }
3970    ///
3971    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3972    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3973    ///
3974    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
3975    ///
3976    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3977    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3978    ///
3979    /// prefix.fill(0);
3980    /// trailer.frame_check_sequence.fill(1);
3981    ///
3982    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
3983    /// ```
3984    #[must_use = "has no side effects"]
3985    #[inline]
3986    fn mut_from_suffix(
3987        source: &mut [u8],
3988    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
3989    where
3990        Self: IntoBytes + KnownLayout,
3991    {
3992        static_assert_dst_is_not_zst!(Self);
3993        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3994    }
3995
3996    /// Interprets the given `source` as a `&Self` with a DST length equal to
3997    /// `count`.
3998    ///
3999    /// This method attempts to return a reference to `source` interpreted as a
4000    /// `Self` with `count` trailing elements. If the length of `source` is not
4001    /// equal to the size of `Self` with `count` elements, or if `source` is not
4002    /// appropriately aligned, this returns `Err`. If [`Self:
4003    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4004    /// error][size-error-from].
4005    ///
4006    /// [self-unaligned]: Unaligned
4007    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4008    ///
4009    /// # Examples
4010    ///
4011    /// ```
4012    /// use zerocopy::FromBytes;
4013    /// # use zerocopy_derive::*;
4014    ///
4015    /// # #[derive(Debug, PartialEq, Eq)]
4016    /// #[derive(FromBytes, Immutable)]
4017    /// #[repr(C)]
4018    /// struct Pixel {
4019    ///     r: u8,
4020    ///     g: u8,
4021    ///     b: u8,
4022    ///     a: u8,
4023    /// }
4024    ///
4025    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4026    ///
4027    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4028    ///
4029    /// assert_eq!(pixels, &[
4030    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4031    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4032    /// ]);
4033    ///
4034    /// ```
4035    ///
4036    /// Since an explicit `count` is provided, this method supports types with
4037    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4038    /// which do not take an explicit count do not support such types.
4039    ///
4040    /// ```
4041    /// use zerocopy::*;
4042    /// # use zerocopy_derive::*;
4043    ///
4044    /// #[derive(FromBytes, Immutable, KnownLayout)]
4045    /// #[repr(C)]
4046    /// struct ZSTy {
4047    ///     leading_sized: [u8; 2],
4048    ///     trailing_dst: [()],
4049    /// }
4050    ///
4051    /// let src = &[85, 85][..];
4052    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4053    /// assert_eq!(zsty.trailing_dst.len(), 42);
4054    /// ```
4055    ///
4056    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4057    #[must_use = "has no side effects"]
4058    #[inline]
4059    fn ref_from_bytes_with_elems(
4060        source: &[u8],
4061        count: usize,
4062    ) -> Result<&Self, CastError<&[u8], Self>>
4063    where
4064        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4065    {
4066        let source = Ptr::from_ref(source);
4067        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4068        match maybe_slf {
4069            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4070            Err(err) => Err(err.map_src(|s| s.as_ref())),
4071        }
4072    }
4073
4074    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4075    /// equal to `count`.
4076    ///
4077    /// This method attempts to return a reference to the prefix of `source`
4078    /// interpreted as a `Self` with `count` trailing elements, and a reference
4079    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4080    /// is not appropriately aligned, this returns `Err`. If [`Self:
4081    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4082    /// error][size-error-from].
4083    ///
4084    /// [self-unaligned]: Unaligned
4085    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4086    ///
4087    /// # Examples
4088    ///
4089    /// ```
4090    /// use zerocopy::FromBytes;
4091    /// # use zerocopy_derive::*;
4092    ///
4093    /// # #[derive(Debug, PartialEq, Eq)]
4094    /// #[derive(FromBytes, Immutable)]
4095    /// #[repr(C)]
4096    /// struct Pixel {
4097    ///     r: u8,
4098    ///     g: u8,
4099    ///     b: u8,
4100    ///     a: u8,
4101    /// }
4102    ///
4103    /// // These are more bytes than are needed to encode two `Pixel`s.
4104    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4105    ///
4106    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4107    ///
4108    /// assert_eq!(pixels, &[
4109    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4110    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4111    /// ]);
4112    ///
4113    /// assert_eq!(suffix, &[8, 9]);
4114    /// ```
4115    ///
4116    /// Since an explicit `count` is provided, this method supports types with
4117    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4118    /// which do not take an explicit count do not support such types.
4119    ///
4120    /// ```
4121    /// use zerocopy::*;
4122    /// # use zerocopy_derive::*;
4123    ///
4124    /// #[derive(FromBytes, Immutable, KnownLayout)]
4125    /// #[repr(C)]
4126    /// struct ZSTy {
4127    ///     leading_sized: [u8; 2],
4128    ///     trailing_dst: [()],
4129    /// }
4130    ///
4131    /// let src = &[85, 85][..];
4132    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4133    /// assert_eq!(zsty.trailing_dst.len(), 42);
4134    /// ```
4135    ///
4136    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4137    #[must_use = "has no side effects"]
4138    #[inline]
4139    fn ref_from_prefix_with_elems(
4140        source: &[u8],
4141        count: usize,
4142    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4143    where
4144        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4145    {
4146        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4147    }
4148
4149    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4150    /// equal to `count`.
4151    ///
4152    /// This method attempts to return a reference to the suffix of `source`
4153    /// interpreted as a `Self` with `count` trailing elements, and a reference
4154    /// to the preceding bytes. If there are insufficient bytes, or if that
4155    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4156    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4157    /// alignment error][size-error-from].
4158    ///
4159    /// [self-unaligned]: Unaligned
4160    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4161    ///
4162    /// # Examples
4163    ///
4164    /// ```
4165    /// use zerocopy::FromBytes;
4166    /// # use zerocopy_derive::*;
4167    ///
4168    /// # #[derive(Debug, PartialEq, Eq)]
4169    /// #[derive(FromBytes, Immutable)]
4170    /// #[repr(C)]
4171    /// struct Pixel {
4172    ///     r: u8,
4173    ///     g: u8,
4174    ///     b: u8,
4175    ///     a: u8,
4176    /// }
4177    ///
4178    /// // These are more bytes than are needed to encode two `Pixel`s.
4179    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4180    ///
4181    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4182    ///
4183    /// assert_eq!(prefix, &[0, 1]);
4184    ///
4185    /// assert_eq!(pixels, &[
4186    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4187    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4188    /// ]);
4189    /// ```
4190    ///
4191    /// Since an explicit `count` is provided, this method supports types with
4192    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4193    /// which do not take an explicit count do not support such types.
4194    ///
4195    /// ```
4196    /// use zerocopy::*;
4197    /// # use zerocopy_derive::*;
4198    ///
4199    /// #[derive(FromBytes, Immutable, KnownLayout)]
4200    /// #[repr(C)]
4201    /// struct ZSTy {
4202    ///     leading_sized: [u8; 2],
4203    ///     trailing_dst: [()],
4204    /// }
4205    ///
4206    /// let src = &[85, 85][..];
4207    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4208    /// assert_eq!(zsty.trailing_dst.len(), 42);
4209    /// ```
4210    ///
4211    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4212    #[must_use = "has no side effects"]
4213    #[inline]
4214    fn ref_from_suffix_with_elems(
4215        source: &[u8],
4216        count: usize,
4217    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4218    where
4219        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4220    {
4221        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4222    }
4223
4224    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4225    /// to `count`.
4226    ///
4227    /// This method attempts to return a reference to `source` interpreted as a
4228    /// `Self` with `count` trailing elements. If the length of `source` is not
4229    /// equal to the size of `Self` with `count` elements, or if `source` is not
4230    /// appropriately aligned, this returns `Err`. If [`Self:
4231    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4232    /// error][size-error-from].
4233    ///
4234    /// [self-unaligned]: Unaligned
4235    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4236    ///
4237    /// # Examples
4238    ///
4239    /// ```
4240    /// use zerocopy::FromBytes;
4241    /// # use zerocopy_derive::*;
4242    ///
4243    /// # #[derive(Debug, PartialEq, Eq)]
4244    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4245    /// #[repr(C)]
4246    /// struct Pixel {
4247    ///     r: u8,
4248    ///     g: u8,
4249    ///     b: u8,
4250    ///     a: u8,
4251    /// }
4252    ///
4253    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4254    ///
4255    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4256    ///
4257    /// assert_eq!(pixels, &[
4258    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4259    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4260    /// ]);
4261    ///
4262    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4263    ///
4264    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4265    /// ```
4266    ///
4267    /// Since an explicit `count` is provided, this method supports types with
4268    /// zero-sized trailing slice elements. Methods such as [`mut_from`] which
4269    /// do not take an explicit count do not support such types.
4270    ///
4271    /// ```
4272    /// use zerocopy::*;
4273    /// # use zerocopy_derive::*;
4274    ///
4275    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4276    /// #[repr(C, packed)]
4277    /// struct ZSTy {
4278    ///     leading_sized: [u8; 2],
4279    ///     trailing_dst: [()],
4280    /// }
4281    ///
4282    /// let src = &mut [85, 85][..];
4283    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4284    /// assert_eq!(zsty.trailing_dst.len(), 42);
4285    /// ```
4286    ///
4287    /// [`mut_from`]: FromBytes::mut_from
4288    #[must_use = "has no side effects"]
4289    #[inline]
4290    fn mut_from_bytes_with_elems(
4291        source: &mut [u8],
4292        count: usize,
4293    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4294    where
4295        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4296    {
4297        let source = Ptr::from_mut(source);
4298        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4299        match maybe_slf {
4300            Ok(slf) => Ok(slf
4301                .recall_validity::<_, (_, (_, (BecauseExclusive, BecauseExclusive)))>()
4302                .as_mut()),
4303            Err(err) => Err(err.map_src(|s| s.as_mut())),
4304        }
4305    }
4306
4307    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4308    /// length equal to `count`.
4309    ///
4310    /// This method attempts to return a reference to the prefix of `source`
4311    /// interpreted as a `Self` with `count` trailing elements, and a reference
4312    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4313    /// is not appropriately aligned, this returns `Err`. If [`Self:
4314    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4315    /// error][size-error-from].
4316    ///
4317    /// [self-unaligned]: Unaligned
4318    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4319    ///
4320    /// # Examples
4321    ///
4322    /// ```
4323    /// use zerocopy::FromBytes;
4324    /// # use zerocopy_derive::*;
4325    ///
4326    /// # #[derive(Debug, PartialEq, Eq)]
4327    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4328    /// #[repr(C)]
4329    /// struct Pixel {
4330    ///     r: u8,
4331    ///     g: u8,
4332    ///     b: u8,
4333    ///     a: u8,
4334    /// }
4335    ///
4336    /// // These are more bytes than are needed to encode two `Pixel`s.
4337    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4338    ///
4339    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4340    ///
4341    /// assert_eq!(pixels, &[
4342    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4343    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4344    /// ]);
4345    ///
4346    /// assert_eq!(suffix, &[8, 9]);
4347    ///
4348    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4349    /// suffix.fill(1);
4350    ///
4351    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4352    /// ```
4353    ///
4354    /// Since an explicit `count` is provided, this method supports types with
4355    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4356    /// which do not take an explicit count do not support such types.
4357    ///
4358    /// ```
4359    /// use zerocopy::*;
4360    /// # use zerocopy_derive::*;
4361    ///
4362    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4363    /// #[repr(C, packed)]
4364    /// struct ZSTy {
4365    ///     leading_sized: [u8; 2],
4366    ///     trailing_dst: [()],
4367    /// }
4368    ///
4369    /// let src = &mut [85, 85][..];
4370    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4371    /// assert_eq!(zsty.trailing_dst.len(), 42);
4372    /// ```
4373    ///
4374    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4375    #[must_use = "has no side effects"]
4376    #[inline]
4377    fn mut_from_prefix_with_elems(
4378        source: &mut [u8],
4379        count: usize,
4380    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4381    where
4382        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4383    {
4384        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4385    }
4386
4387    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4388    /// length equal to `count`.
4389    ///
4390    /// This method attempts to return a reference to the suffix of `source`
4391    /// interpreted as a `Self` with `count` trailing elements, and a reference
4392    /// to the remaining bytes. If there are insufficient bytes, or if that
4393    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4394    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4395    /// alignment error][size-error-from].
4396    ///
4397    /// [self-unaligned]: Unaligned
4398    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4399    ///
4400    /// # Examples
4401    ///
4402    /// ```
4403    /// use zerocopy::FromBytes;
4404    /// # use zerocopy_derive::*;
4405    ///
4406    /// # #[derive(Debug, PartialEq, Eq)]
4407    /// #[derive(FromBytes, IntoBytes, Immutable)]
4408    /// #[repr(C)]
4409    /// struct Pixel {
4410    ///     r: u8,
4411    ///     g: u8,
4412    ///     b: u8,
4413    ///     a: u8,
4414    /// }
4415    ///
4416    /// // These are more bytes than are needed to encode two `Pixel`s.
4417    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4418    ///
4419    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4420    ///
4421    /// assert_eq!(prefix, &[0, 1]);
4422    ///
4423    /// assert_eq!(pixels, &[
4424    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4425    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4426    /// ]);
4427    ///
4428    /// prefix.fill(9);
4429    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4430    ///
4431    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4432    /// ```
4433    ///
4434    /// Since an explicit `count` is provided, this method supports types with
4435    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4436    /// which do not take an explicit count do not support such types.
4437    ///
4438    /// ```
4439    /// use zerocopy::*;
4440    /// # use zerocopy_derive::*;
4441    ///
4442    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4443    /// #[repr(C, packed)]
4444    /// struct ZSTy {
4445    ///     leading_sized: [u8; 2],
4446    ///     trailing_dst: [()],
4447    /// }
4448    ///
4449    /// let src = &mut [85, 85][..];
4450    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4451    /// assert_eq!(zsty.trailing_dst.len(), 42);
4452    /// ```
4453    ///
4454    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4455    #[must_use = "has no side effects"]
4456    #[inline]
4457    fn mut_from_suffix_with_elems(
4458        source: &mut [u8],
4459        count: usize,
4460    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4461    where
4462        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4463    {
4464        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4465    }
4466
4467    /// Reads a copy of `Self` from the given `source`.
4468    ///
4469    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4470    ///
4471    /// # Examples
4472    ///
4473    /// ```
4474    /// use zerocopy::FromBytes;
4475    /// # use zerocopy_derive::*;
4476    ///
4477    /// #[derive(FromBytes)]
4478    /// #[repr(C)]
4479    /// struct PacketHeader {
4480    ///     src_port: [u8; 2],
4481    ///     dst_port: [u8; 2],
4482    ///     length: [u8; 2],
4483    ///     checksum: [u8; 2],
4484    /// }
4485    ///
4486    /// // These bytes encode a `PacketHeader`.
4487    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4488    ///
4489    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4490    ///
4491    /// assert_eq!(header.src_port, [0, 1]);
4492    /// assert_eq!(header.dst_port, [2, 3]);
4493    /// assert_eq!(header.length, [4, 5]);
4494    /// assert_eq!(header.checksum, [6, 7]);
4495    /// ```
4496    #[must_use = "has no side effects"]
4497    #[inline]
4498    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4499    where
4500        Self: Sized,
4501    {
4502        match Ref::<_, Unalign<Self>>::sized_from(source) {
4503            Ok(r) => Ok(Ref::read(&r).into_inner()),
4504            Err(CastError::Size(e)) => Err(e.with_dst()),
4505            Err(CastError::Alignment(_)) => {
4506                // SAFETY: `Unalign<Self>` is trivially aligned, so
4507                // `Ref::sized_from` cannot fail due to unmet alignment
4508                // requirements.
4509                unsafe { core::hint::unreachable_unchecked() }
4510            }
4511            Err(CastError::Validity(i)) => match i {},
4512        }
4513    }
4514
4515    /// Reads a copy of `Self` from the prefix of the given `source`.
4516    ///
4517    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4518    /// of `source`, returning that `Self` and any remaining bytes. If
4519    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4520    ///
4521    /// # Examples
4522    ///
4523    /// ```
4524    /// use zerocopy::FromBytes;
4525    /// # use zerocopy_derive::*;
4526    ///
4527    /// #[derive(FromBytes)]
4528    /// #[repr(C)]
4529    /// struct PacketHeader {
4530    ///     src_port: [u8; 2],
4531    ///     dst_port: [u8; 2],
4532    ///     length: [u8; 2],
4533    ///     checksum: [u8; 2],
4534    /// }
4535    ///
4536    /// // These are more bytes than are needed to encode a `PacketHeader`.
4537    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4538    ///
4539    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4540    ///
4541    /// assert_eq!(header.src_port, [0, 1]);
4542    /// assert_eq!(header.dst_port, [2, 3]);
4543    /// assert_eq!(header.length, [4, 5]);
4544    /// assert_eq!(header.checksum, [6, 7]);
4545    /// assert_eq!(body, [8, 9]);
4546    /// ```
4547    #[must_use = "has no side effects"]
4548    #[inline]
4549    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4550    where
4551        Self: Sized,
4552    {
4553        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4554            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4555            Err(CastError::Size(e)) => Err(e.with_dst()),
4556            Err(CastError::Alignment(_)) => {
4557                // SAFETY: `Unalign<Self>` is trivially aligned, so
4558                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4559                // requirements.
4560                unsafe { core::hint::unreachable_unchecked() }
4561            }
4562            Err(CastError::Validity(i)) => match i {},
4563        }
4564    }
4565
4566    /// Reads a copy of `Self` from the suffix of the given `source`.
4567    ///
4568    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4569    /// of `source`, returning that `Self` and any preceding bytes. If
4570    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4571    ///
4572    /// # Examples
4573    ///
4574    /// ```
4575    /// use zerocopy::FromBytes;
4576    /// # use zerocopy_derive::*;
4577    ///
4578    /// #[derive(FromBytes)]
4579    /// #[repr(C)]
4580    /// struct PacketTrailer {
4581    ///     frame_check_sequence: [u8; 4],
4582    /// }
4583    ///
4584    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4585    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4586    ///
4587    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4588    ///
4589    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4590    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4591    /// ```
4592    #[must_use = "has no side effects"]
4593    #[inline]
4594    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4595    where
4596        Self: Sized,
4597    {
4598        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4599            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4600            Err(CastError::Size(e)) => Err(e.with_dst()),
4601            Err(CastError::Alignment(_)) => {
4602                // SAFETY: `Unalign<Self>` is trivially aligned, so
4603                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4604                // requirements.
4605                unsafe { core::hint::unreachable_unchecked() }
4606            }
4607            Err(CastError::Validity(i)) => match i {},
4608        }
4609    }
4610
4611    /// Reads a copy of `self` from an `io::Read`.
4612    ///
4613    /// This is useful for interfacing with operating system byte sinks (files,
4614    /// sockets, etc.).
4615    ///
4616    /// # Examples
4617    ///
4618    /// ```no_run
4619    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4620    /// use std::fs::File;
4621    /// # use zerocopy_derive::*;
4622    ///
4623    /// #[derive(FromBytes)]
4624    /// #[repr(C)]
4625    /// struct BitmapFileHeader {
4626    ///     signature: [u8; 2],
4627    ///     size: U32,
4628    ///     reserved: U64,
4629    ///     offset: U64,
4630    /// }
4631    ///
4632    /// let mut file = File::open("image.bin").unwrap();
4633    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4634    /// ```
4635    #[cfg(feature = "std")]
4636    #[inline(always)]
4637    fn read_from_io<R>(mut src: R) -> io::Result<Self>
4638    where
4639        Self: Sized,
4640        R: io::Read,
4641    {
4642        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4643        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4644        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4645        // will not necessarily preserve zeros written to those padding byte
4646        // locations, and so `buf` could contain uninitialized bytes.
4647        let mut buf = CoreMaybeUninit::<Self>::uninit();
4648        buf.zero();
4649
4650        let ptr = Ptr::from_mut(&mut buf);
4651        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4652        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4653        // cannot be used to write values which will violate `buf`'s bit
4654        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4655        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4656        // cannot be violated even though `buf` may have more permissive bit
4657        // validity than `ptr`.
4658        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4659        let ptr = ptr.as_bytes::<BecauseExclusive>();
4660        src.read_exact(ptr.as_mut())?;
4661        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4662        // `FromBytes`.
4663        Ok(unsafe { buf.assume_init() })
4664    }
4665
4666    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4667    #[doc(hidden)]
4668    #[must_use = "has no side effects"]
4669    #[inline(always)]
4670    fn ref_from(source: &[u8]) -> Option<&Self>
4671    where
4672        Self: KnownLayout + Immutable,
4673    {
4674        Self::ref_from_bytes(source).ok()
4675    }
4676
4677    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4678    #[doc(hidden)]
4679    #[must_use = "has no side effects"]
4680    #[inline(always)]
4681    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4682    where
4683        Self: KnownLayout + IntoBytes,
4684    {
4685        Self::mut_from_bytes(source).ok()
4686    }
4687
4688    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4689    #[doc(hidden)]
4690    #[must_use = "has no side effects"]
4691    #[inline(always)]
4692    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4693    where
4694        Self: Sized + Immutable,
4695    {
4696        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4697    }
4698
4699    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4700    #[doc(hidden)]
4701    #[must_use = "has no side effects"]
4702    #[inline(always)]
4703    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4704    where
4705        Self: Sized + Immutable,
4706    {
4707        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4708    }
4709
4710    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4711    #[doc(hidden)]
4712    #[must_use = "has no side effects"]
4713    #[inline(always)]
4714    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4715    where
4716        Self: Sized + IntoBytes,
4717    {
4718        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4719    }
4720
4721    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4722    #[doc(hidden)]
4723    #[must_use = "has no side effects"]
4724    #[inline(always)]
4725    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4726    where
4727        Self: Sized + IntoBytes,
4728    {
4729        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4730    }
4731
4732    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4733    #[doc(hidden)]
4734    #[must_use = "has no side effects"]
4735    #[inline(always)]
4736    fn read_from(source: &[u8]) -> Option<Self>
4737    where
4738        Self: Sized,
4739    {
4740        Self::read_from_bytes(source).ok()
4741    }
4742}
4743
4744/// Interprets the given affix of the given bytes as a `&Self`.
4745///
4746/// This method computes the largest possible size of `Self` that can fit in the
4747/// prefix or suffix bytes of `source`, then attempts to return both a reference
4748/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4749/// If there are insufficient bytes, or if that affix of `source` is not
4750/// appropriately aligned, this returns `Err`.
4751#[inline(always)]
4752fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4753    source: &[u8],
4754    meta: Option<T::PointerMetadata>,
4755    cast_type: CastType,
4756) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4757    let (slf, prefix_suffix) = Ptr::from_ref(source)
4758        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4759        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4760    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4761}
4762
4763/// Interprets the given affix of the given bytes as a `&mut Self` without
4764/// copying.
4765///
4766/// This method computes the largest possible size of `Self` that can fit in the
4767/// prefix or suffix bytes of `source`, then attempts to return both a reference
4768/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4769/// If there are insufficient bytes, or if that affix of `source` is not
4770/// appropriately aligned, this returns `Err`.
4771#[inline(always)]
4772fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4773    source: &mut [u8],
4774    meta: Option<T::PointerMetadata>,
4775    cast_type: CastType,
4776) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4777    let (slf, prefix_suffix) = Ptr::from_mut(source)
4778        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4779        .map_err(|err| err.map_src(|s| s.as_mut()))?;
4780    Ok((slf.recall_validity().as_mut(), prefix_suffix.as_mut()))
4781}
4782
4783/// Analyzes whether a type is [`IntoBytes`].
4784///
4785/// This derive analyzes, at compile time, whether the annotated type satisfies
4786/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4787/// sound to do so. This derive can be applied to structs and enums (see below
4788/// for union support); e.g.:
4789///
4790/// ```
4791/// # use zerocopy_derive::{IntoBytes};
4792/// #[derive(IntoBytes)]
4793/// #[repr(C)]
4794/// struct MyStruct {
4795/// # /*
4796///     ...
4797/// # */
4798/// }
4799///
4800/// #[derive(IntoBytes)]
4801/// #[repr(u8)]
4802/// enum MyEnum {
4803/// #   Variant,
4804/// # /*
4805///     ...
4806/// # */
4807/// }
4808/// ```
4809///
4810/// [safety conditions]: trait@IntoBytes#safety
4811///
4812/// # Error Messages
4813///
4814/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4815/// for `IntoBytes` is implemented, you may get an error like this:
4816///
4817/// ```text
4818/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
4819///   --> lib.rs:23:10
4820///    |
4821///  1 | #[derive(IntoBytes)]
4822///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
4823///    |
4824///    = help: the following implementations were found:
4825///                   <() as PaddingFree<T, false>>
4826/// ```
4827///
4828/// This error indicates that the type being annotated has padding bytes, which
4829/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
4830/// fields by using types in the [`byteorder`] module, wrapping field types in
4831/// [`Unalign`], adding explicit struct fields where those padding bytes would
4832/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
4833/// layout] for more information about type layout and padding.
4834///
4835/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
4836///
4837/// # Unions
4838///
4839/// Currently, union bit validity is [up in the air][union-validity], and so
4840/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
4841/// However, implementing `IntoBytes` on a union type is likely sound on all
4842/// existing Rust toolchains - it's just that it may become unsound in the
4843/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
4844/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
4845///
4846/// ```shell
4847/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
4848/// ```
4849///
4850/// However, it is your responsibility to ensure that this derive is sound on
4851/// the specific versions of the Rust toolchain you are using! We make no
4852/// stability or soundness guarantees regarding this cfg, and may remove it at
4853/// any point.
4854///
4855/// We are actively working with Rust to stabilize the necessary language
4856/// guarantees to support this in a forwards-compatible way, which will enable
4857/// us to remove the cfg gate. As part of this effort, we need to know how much
4858/// demand there is for this feature. If you would like to use `IntoBytes` on
4859/// unions, [please let us know][discussion].
4860///
4861/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
4862/// [discussion]: https://github.com/google/zerocopy/discussions/1802
4863///
4864/// # Analysis
4865///
4866/// *This section describes, roughly, the analysis performed by this derive to
4867/// determine whether it is sound to implement `IntoBytes` for a given type.
4868/// Unless you are modifying the implementation of this derive, or attempting to
4869/// manually implement `IntoBytes` for a type yourself, you don't need to read
4870/// this section.*
4871///
4872/// If a type has the following properties, then this derive can implement
4873/// `IntoBytes` for that type:
4874///
4875/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
4876///     - if the type is `repr(transparent)` or `repr(packed)`, it is
4877///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
4878///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
4879///       if its field is [`IntoBytes`]; else,
4880///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
4881///       is sized and has no padding bytes; else,
4882///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
4883/// - If the type is an enum:
4884///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
4885///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
4886///   - It must have no padding bytes.
4887///   - Its fields must be [`IntoBytes`].
4888///
4889/// This analysis is subject to change. Unsafe code may *only* rely on the
4890/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
4891/// implementation details of this derive.
4892///
4893/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
4894#[cfg(any(feature = "derive", test))]
4895#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
4896pub use zerocopy_derive::IntoBytes;
4897
4898/// Types that can be converted to an immutable slice of initialized bytes.
4899///
4900/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
4901/// same size. This is useful for efficiently serializing structured data as raw
4902/// bytes.
4903///
4904/// # Implementation
4905///
4906/// **Do not implement this trait yourself!** Instead, use
4907/// [`#[derive(IntoBytes)]`][derive]; e.g.:
4908///
4909/// ```
4910/// # use zerocopy_derive::IntoBytes;
4911/// #[derive(IntoBytes)]
4912/// #[repr(C)]
4913/// struct MyStruct {
4914/// # /*
4915///     ...
4916/// # */
4917/// }
4918///
4919/// #[derive(IntoBytes)]
4920/// #[repr(u8)]
4921/// enum MyEnum {
4922/// #   Variant0,
4923/// # /*
4924///     ...
4925/// # */
4926/// }
4927/// ```
4928///
4929/// This derive performs a sophisticated, compile-time safety analysis to
4930/// determine whether a type is `IntoBytes`. See the [derive
4931/// documentation][derive] for guidance on how to interpret error messages
4932/// produced by the derive's analysis.
4933///
4934/// # Safety
4935///
4936/// *This section describes what is required in order for `T: IntoBytes`, and
4937/// what unsafe code may assume of such types. If you don't plan on implementing
4938/// `IntoBytes` manually, and you don't plan on writing unsafe code that
4939/// operates on `IntoBytes` types, then you don't need to read this section.*
4940///
4941/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
4942/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
4943/// marked as `IntoBytes` which violates this contract, it may cause undefined
4944/// behavior.
4945///
4946/// `#[derive(IntoBytes)]` only permits [types which satisfy these
4947/// requirements][derive-analysis].
4948///
4949#[cfg_attr(
4950    feature = "derive",
4951    doc = "[derive]: zerocopy_derive::IntoBytes",
4952    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
4953)]
4954#[cfg_attr(
4955    not(feature = "derive"),
4956    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
4957    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
4958)]
4959#[cfg_attr(
4960    zerocopy_diagnostic_on_unimplemented_1_78_0,
4961    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
4962)]
4963pub unsafe trait IntoBytes {
4964    // The `Self: Sized` bound makes it so that this function doesn't prevent
4965    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
4966    // prevent object safety, but those provide a benefit in exchange for object
4967    // safety. If at some point we remove those methods, change their type
4968    // signatures, or move them out of this trait so that `IntoBytes` is object
4969    // safe again, it's important that this function not prevent object safety.
4970    #[doc(hidden)]
4971    fn only_derive_is_allowed_to_implement_this_trait()
4972    where
4973        Self: Sized;
4974
4975    /// Gets the bytes of this value.
4976    ///
4977    /// # Examples
4978    ///
4979    /// ```
4980    /// use zerocopy::IntoBytes;
4981    /// # use zerocopy_derive::*;
4982    ///
4983    /// #[derive(IntoBytes, Immutable)]
4984    /// #[repr(C)]
4985    /// struct PacketHeader {
4986    ///     src_port: [u8; 2],
4987    ///     dst_port: [u8; 2],
4988    ///     length: [u8; 2],
4989    ///     checksum: [u8; 2],
4990    /// }
4991    ///
4992    /// let header = PacketHeader {
4993    ///     src_port: [0, 1],
4994    ///     dst_port: [2, 3],
4995    ///     length: [4, 5],
4996    ///     checksum: [6, 7],
4997    /// };
4998    ///
4999    /// let bytes = header.as_bytes();
5000    ///
5001    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5002    /// ```
5003    #[must_use = "has no side effects"]
5004    #[inline(always)]
5005    fn as_bytes(&self) -> &[u8]
5006    where
5007        Self: Immutable,
5008    {
5009        // Note that this method does not have a `Self: Sized` bound;
5010        // `size_of_val` works for unsized values too.
5011        let len = mem::size_of_val(self);
5012        let slf: *const Self = self;
5013
5014        // SAFETY:
5015        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5016        //   many bytes because...
5017        //   - `slf` is the same pointer as `self`, and `self` is a reference
5018        //     which points to an object whose size is `len`. Thus...
5019        //     - The entire region of `len` bytes starting at `slf` is contained
5020        //       within a single allocation.
5021        //     - `slf` is non-null.
5022        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5023        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5024        //   initialized.
5025        // - Since `slf` is derived from `self`, and `self` is an immutable
5026        //   reference, the only other references to this memory region that
5027        //   could exist are other immutable references, and those don't allow
5028        //   mutation. `Self: Immutable` prohibits types which contain
5029        //   `UnsafeCell`s, which are the only types for which this rule
5030        //   wouldn't be sufficient.
5031        // - The total size of the resulting slice is no larger than
5032        //   `isize::MAX` because no allocation produced by safe code can be
5033        //   larger than `isize::MAX`.
5034        //
5035        // TODO(#429): Add references to docs and quotes.
5036        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5037    }
5038
5039    /// Gets the bytes of this value mutably.
5040    ///
5041    /// # Examples
5042    ///
5043    /// ```
5044    /// use zerocopy::IntoBytes;
5045    /// # use zerocopy_derive::*;
5046    ///
5047    /// # #[derive(Eq, PartialEq, Debug)]
5048    /// #[derive(FromBytes, IntoBytes, Immutable)]
5049    /// #[repr(C)]
5050    /// struct PacketHeader {
5051    ///     src_port: [u8; 2],
5052    ///     dst_port: [u8; 2],
5053    ///     length: [u8; 2],
5054    ///     checksum: [u8; 2],
5055    /// }
5056    ///
5057    /// let mut header = PacketHeader {
5058    ///     src_port: [0, 1],
5059    ///     dst_port: [2, 3],
5060    ///     length: [4, 5],
5061    ///     checksum: [6, 7],
5062    /// };
5063    ///
5064    /// let bytes = header.as_mut_bytes();
5065    ///
5066    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5067    ///
5068    /// bytes.reverse();
5069    ///
5070    /// assert_eq!(header, PacketHeader {
5071    ///     src_port: [7, 6],
5072    ///     dst_port: [5, 4],
5073    ///     length: [3, 2],
5074    ///     checksum: [1, 0],
5075    /// });
5076    /// ```
5077    #[must_use = "has no side effects"]
5078    #[inline(always)]
5079    fn as_mut_bytes(&mut self) -> &mut [u8]
5080    where
5081        Self: FromBytes,
5082    {
5083        // Note that this method does not have a `Self: Sized` bound;
5084        // `size_of_val` works for unsized values too.
5085        let len = mem::size_of_val(self);
5086        let slf: *mut Self = self;
5087
5088        // SAFETY:
5089        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5090        //   size_of::<u8>()` many bytes because...
5091        //   - `slf` is the same pointer as `self`, and `self` is a reference
5092        //     which points to an object whose size is `len`. Thus...
5093        //     - The entire region of `len` bytes starting at `slf` is contained
5094        //       within a single allocation.
5095        //     - `slf` is non-null.
5096        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5097        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5098        //   initialized.
5099        // - `Self: FromBytes` ensures that no write to this memory region
5100        //   could result in it containing an invalid `Self`.
5101        // - Since `slf` is derived from `self`, and `self` is a mutable
5102        //   reference, no other references to this memory region can exist.
5103        // - The total size of the resulting slice is no larger than
5104        //   `isize::MAX` because no allocation produced by safe code can be
5105        //   larger than `isize::MAX`.
5106        //
5107        // TODO(#429): Add references to docs and quotes.
5108        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5109    }
5110
5111    /// Writes a copy of `self` to `dst`.
5112    ///
5113    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5114    ///
5115    /// # Examples
5116    ///
5117    /// ```
5118    /// use zerocopy::IntoBytes;
5119    /// # use zerocopy_derive::*;
5120    ///
5121    /// #[derive(IntoBytes, Immutable)]
5122    /// #[repr(C)]
5123    /// struct PacketHeader {
5124    ///     src_port: [u8; 2],
5125    ///     dst_port: [u8; 2],
5126    ///     length: [u8; 2],
5127    ///     checksum: [u8; 2],
5128    /// }
5129    ///
5130    /// let header = PacketHeader {
5131    ///     src_port: [0, 1],
5132    ///     dst_port: [2, 3],
5133    ///     length: [4, 5],
5134    ///     checksum: [6, 7],
5135    /// };
5136    ///
5137    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5138    ///
5139    /// header.write_to(&mut bytes[..]);
5140    ///
5141    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5142    /// ```
5143    ///
5144    /// If too many or too few target bytes are provided, `write_to` returns
5145    /// `Err` and leaves the target bytes unmodified:
5146    ///
5147    /// ```
5148    /// # use zerocopy::IntoBytes;
5149    /// # let header = u128::MAX;
5150    /// let mut excessive_bytes = &mut [0u8; 128][..];
5151    ///
5152    /// let write_result = header.write_to(excessive_bytes);
5153    ///
5154    /// assert!(write_result.is_err());
5155    /// assert_eq!(excessive_bytes, [0u8; 128]);
5156    /// ```
5157    #[must_use = "callers should check the return value to see if the operation succeeded"]
5158    #[inline]
5159    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5160    where
5161        Self: Immutable,
5162    {
5163        let src = self.as_bytes();
5164        if dst.len() == src.len() {
5165            // SAFETY: Within this branch of the conditional, we have ensured
5166            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5167            // source nor the size of the destination change between the above
5168            // size check and the invocation of `copy_unchecked`.
5169            unsafe { util::copy_unchecked(src, dst) }
5170            Ok(())
5171        } else {
5172            Err(SizeError::new(self))
5173        }
5174    }
5175
5176    /// Writes a copy of `self` to the prefix of `dst`.
5177    ///
5178    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5179    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5180    ///
5181    /// # Examples
5182    ///
5183    /// ```
5184    /// use zerocopy::IntoBytes;
5185    /// # use zerocopy_derive::*;
5186    ///
5187    /// #[derive(IntoBytes, Immutable)]
5188    /// #[repr(C)]
5189    /// struct PacketHeader {
5190    ///     src_port: [u8; 2],
5191    ///     dst_port: [u8; 2],
5192    ///     length: [u8; 2],
5193    ///     checksum: [u8; 2],
5194    /// }
5195    ///
5196    /// let header = PacketHeader {
5197    ///     src_port: [0, 1],
5198    ///     dst_port: [2, 3],
5199    ///     length: [4, 5],
5200    ///     checksum: [6, 7],
5201    /// };
5202    ///
5203    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5204    ///
5205    /// header.write_to_prefix(&mut bytes[..]);
5206    ///
5207    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5208    /// ```
5209    ///
5210    /// If insufficient target bytes are provided, `write_to_prefix` returns
5211    /// `Err` and leaves the target bytes unmodified:
5212    ///
5213    /// ```
5214    /// # use zerocopy::IntoBytes;
5215    /// # let header = u128::MAX;
5216    /// let mut insufficent_bytes = &mut [0, 0][..];
5217    ///
5218    /// let write_result = header.write_to_suffix(insufficent_bytes);
5219    ///
5220    /// assert!(write_result.is_err());
5221    /// assert_eq!(insufficent_bytes, [0, 0]);
5222    /// ```
5223    #[must_use = "callers should check the return value to see if the operation succeeded"]
5224    #[inline]
5225    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5226    where
5227        Self: Immutable,
5228    {
5229        let src = self.as_bytes();
5230        match dst.get_mut(..src.len()) {
5231            Some(dst) => {
5232                // SAFETY: Within this branch of the `match`, we have ensured
5233                // through fallible subslicing that `dst.len()` is equal to
5234                // `src.len()`. Neither the size of the source nor the size of
5235                // the destination change between the above subslicing operation
5236                // and the invocation of `copy_unchecked`.
5237                unsafe { util::copy_unchecked(src, dst) }
5238                Ok(())
5239            }
5240            None => Err(SizeError::new(self)),
5241        }
5242    }
5243
5244    /// Writes a copy of `self` to the suffix of `dst`.
5245    ///
5246    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5247    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5248    ///
5249    /// # Examples
5250    ///
5251    /// ```
5252    /// use zerocopy::IntoBytes;
5253    /// # use zerocopy_derive::*;
5254    ///
5255    /// #[derive(IntoBytes, Immutable)]
5256    /// #[repr(C)]
5257    /// struct PacketHeader {
5258    ///     src_port: [u8; 2],
5259    ///     dst_port: [u8; 2],
5260    ///     length: [u8; 2],
5261    ///     checksum: [u8; 2],
5262    /// }
5263    ///
5264    /// let header = PacketHeader {
5265    ///     src_port: [0, 1],
5266    ///     dst_port: [2, 3],
5267    ///     length: [4, 5],
5268    ///     checksum: [6, 7],
5269    /// };
5270    ///
5271    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5272    ///
5273    /// header.write_to_suffix(&mut bytes[..]);
5274    ///
5275    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5276    ///
5277    /// let mut insufficent_bytes = &mut [0, 0][..];
5278    ///
5279    /// let write_result = header.write_to_suffix(insufficent_bytes);
5280    ///
5281    /// assert!(write_result.is_err());
5282    /// assert_eq!(insufficent_bytes, [0, 0]);
5283    /// ```
5284    ///
5285    /// If insufficient target bytes are provided, `write_to_suffix` returns
5286    /// `Err` and leaves the target bytes unmodified:
5287    ///
5288    /// ```
5289    /// # use zerocopy::IntoBytes;
5290    /// # let header = u128::MAX;
5291    /// let mut insufficent_bytes = &mut [0, 0][..];
5292    ///
5293    /// let write_result = header.write_to_suffix(insufficent_bytes);
5294    ///
5295    /// assert!(write_result.is_err());
5296    /// assert_eq!(insufficent_bytes, [0, 0]);
5297    /// ```
5298    #[must_use = "callers should check the return value to see if the operation succeeded"]
5299    #[inline]
5300    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5301    where
5302        Self: Immutable,
5303    {
5304        let src = self.as_bytes();
5305        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5306            start
5307        } else {
5308            return Err(SizeError::new(self));
5309        };
5310        let dst = if let Some(dst) = dst.get_mut(start..) {
5311            dst
5312        } else {
5313            // get_mut() should never return None here. We return a `SizeError`
5314            // rather than .unwrap() because in the event the branch is not
5315            // optimized away, returning a value is generally lighter-weight
5316            // than panicking.
5317            return Err(SizeError::new(self));
5318        };
5319        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5320        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5321        // nor the size of the destination change between the above subslicing
5322        // operation and the invocation of `copy_unchecked`.
5323        unsafe {
5324            util::copy_unchecked(src, dst);
5325        }
5326        Ok(())
5327    }
5328
5329    /// Writes a copy of `self` to an `io::Write`.
5330    ///
5331    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5332    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5333    ///
5334    /// # Examples
5335    ///
5336    /// ```no_run
5337    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5338    /// use std::fs::File;
5339    /// # use zerocopy_derive::*;
5340    ///
5341    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5342    /// #[repr(C, packed)]
5343    /// struct GrayscaleImage {
5344    ///     height: U16,
5345    ///     width: U16,
5346    ///     pixels: [U16],
5347    /// }
5348    ///
5349    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5350    /// let mut file = File::create("image.bin").unwrap();
5351    /// image.write_to_io(&mut file).unwrap();
5352    /// ```
5353    ///
5354    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5355    /// have occured; e.g.:
5356    ///
5357    /// ```
5358    /// # use zerocopy::IntoBytes;
5359    ///
5360    /// let src = u128::MAX;
5361    /// let mut dst = [0u8; 2];
5362    ///
5363    /// let write_result = src.write_to_io(&mut dst[..]);
5364    ///
5365    /// assert!(write_result.is_err());
5366    /// assert_eq!(dst, [255, 255]);
5367    /// ```
5368    #[cfg(feature = "std")]
5369    #[inline(always)]
5370    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5371    where
5372        Self: Immutable,
5373        W: io::Write,
5374    {
5375        dst.write_all(self.as_bytes())
5376    }
5377
5378    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5379    #[doc(hidden)]
5380    #[inline]
5381    fn as_bytes_mut(&mut self) -> &mut [u8]
5382    where
5383        Self: FromBytes,
5384    {
5385        self.as_mut_bytes()
5386    }
5387}
5388
5389/// Analyzes whether a type is [`Unaligned`].
5390///
5391/// This derive analyzes, at compile time, whether the annotated type satisfies
5392/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5393/// sound to do so. This derive can be applied to structs, enums, and unions;
5394/// e.g.:
5395///
5396/// ```
5397/// # use zerocopy_derive::Unaligned;
5398/// #[derive(Unaligned)]
5399/// #[repr(C)]
5400/// struct MyStruct {
5401/// # /*
5402///     ...
5403/// # */
5404/// }
5405///
5406/// #[derive(Unaligned)]
5407/// #[repr(u8)]
5408/// enum MyEnum {
5409/// #   Variant0,
5410/// # /*
5411///     ...
5412/// # */
5413/// }
5414///
5415/// #[derive(Unaligned)]
5416/// #[repr(packed)]
5417/// union MyUnion {
5418/// #   variant: u8,
5419/// # /*
5420///     ...
5421/// # */
5422/// }
5423/// ```
5424///
5425/// # Analysis
5426///
5427/// *This section describes, roughly, the analysis performed by this derive to
5428/// determine whether it is sound to implement `Unaligned` for a given type.
5429/// Unless you are modifying the implementation of this derive, or attempting to
5430/// manually implement `Unaligned` for a type yourself, you don't need to read
5431/// this section.*
5432///
5433/// If a type has the following properties, then this derive can implement
5434/// `Unaligned` for that type:
5435///
5436/// - If the type is a struct or union:
5437///   - If `repr(align(N))` is provided, `N` must equal 1.
5438///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5439///     [`Unaligned`].
5440///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5441///     `repr(packed)` or `repr(packed(1))`.
5442/// - If the type is an enum:
5443///   - If `repr(align(N))` is provided, `N` must equal 1.
5444///   - It must be a field-less enum (meaning that all variants have no fields).
5445///   - It must be `repr(i8)` or `repr(u8)`.
5446///
5447/// [safety conditions]: trait@Unaligned#safety
5448#[cfg(any(feature = "derive", test))]
5449#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5450pub use zerocopy_derive::Unaligned;
5451
5452/// Types with no alignment requirement.
5453///
5454/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5455///
5456/// # Implementation
5457///
5458/// **Do not implement this trait yourself!** Instead, use
5459/// [`#[derive(Unaligned)]`][derive]; e.g.:
5460///
5461/// ```
5462/// # use zerocopy_derive::Unaligned;
5463/// #[derive(Unaligned)]
5464/// #[repr(C)]
5465/// struct MyStruct {
5466/// # /*
5467///     ...
5468/// # */
5469/// }
5470///
5471/// #[derive(Unaligned)]
5472/// #[repr(u8)]
5473/// enum MyEnum {
5474/// #   Variant0,
5475/// # /*
5476///     ...
5477/// # */
5478/// }
5479///
5480/// #[derive(Unaligned)]
5481/// #[repr(packed)]
5482/// union MyUnion {
5483/// #   variant: u8,
5484/// # /*
5485///     ...
5486/// # */
5487/// }
5488/// ```
5489///
5490/// This derive performs a sophisticated, compile-time safety analysis to
5491/// determine whether a type is `Unaligned`.
5492///
5493/// # Safety
5494///
5495/// *This section describes what is required in order for `T: Unaligned`, and
5496/// what unsafe code may assume of such types. If you don't plan on implementing
5497/// `Unaligned` manually, and you don't plan on writing unsafe code that
5498/// operates on `Unaligned` types, then you don't need to read this section.*
5499///
5500/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5501/// reference to `T` at any memory location regardless of alignment. If a type
5502/// is marked as `Unaligned` which violates this contract, it may cause
5503/// undefined behavior.
5504///
5505/// `#[derive(Unaligned)]` only permits [types which satisfy these
5506/// requirements][derive-analysis].
5507///
5508#[cfg_attr(
5509    feature = "derive",
5510    doc = "[derive]: zerocopy_derive::Unaligned",
5511    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5512)]
5513#[cfg_attr(
5514    not(feature = "derive"),
5515    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5516    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5517)]
5518#[cfg_attr(
5519    zerocopy_diagnostic_on_unimplemented_1_78_0,
5520    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5521)]
5522pub unsafe trait Unaligned {
5523    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5524    // safe.
5525    #[doc(hidden)]
5526    fn only_derive_is_allowed_to_implement_this_trait()
5527    where
5528        Self: Sized;
5529}
5530
5531/// Derives an optimized [`Hash`] implementation.
5532///
5533/// This derive can be applied to structs and enums implementing both
5534/// [`Immutable`] and [`IntoBytes`]; e.g.:
5535///
5536/// ```
5537/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5538/// #[derive(ByteHash, Immutable, IntoBytes)]
5539/// #[repr(C)]
5540/// struct MyStruct {
5541/// # /*
5542///     ...
5543/// # */
5544/// }
5545///
5546/// #[derive(ByteHash, Immutable, IntoBytes)]
5547/// #[repr(u8)]
5548/// enum MyEnum {
5549/// #   Variant,
5550/// # /*
5551///     ...
5552/// # */
5553/// }
5554/// ```
5555///
5556/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5557/// individually hashing each field and combining the results. Instead, the
5558/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5559/// `derive(ByteHash)` convert the entirey of `self` to a byte slice and hashes
5560/// it in a single call to [`Hasher::write()`]. This may have performance
5561/// advantages.
5562///
5563/// [`Hash`]: core::hash::Hash
5564/// [`Hash::hash()`]: core::hash::Hash::hash()
5565/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5566#[cfg(any(feature = "derive", test))]
5567#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5568pub use zerocopy_derive::ByteHash;
5569
5570/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5571///
5572/// This derive can be applied to structs and enums implementing both
5573/// [`Immutable`] and [`IntoBytes`]; e.g.:
5574///
5575/// ```
5576/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5577/// #[derive(ByteEq, Immutable, IntoBytes)]
5578/// #[repr(C)]
5579/// struct MyStruct {
5580/// # /*
5581///     ...
5582/// # */
5583/// }
5584///
5585/// #[derive(ByteEq, Immutable, IntoBytes)]
5586/// #[repr(u8)]
5587/// enum MyEnum {
5588/// #   Variant,
5589/// # /*
5590///     ...
5591/// # */
5592/// }
5593/// ```
5594///
5595/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5596/// equality by individually comparing each field. Instead, the implementation
5597/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirey of
5598/// `self` and `other` to byte slices and compares those slices for equality.
5599/// This may have performance advantages.
5600#[cfg(any(feature = "derive", test))]
5601#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5602pub use zerocopy_derive::ByteEq;
5603
5604/// Implements [`SplitAt`].
5605///
5606/// This derive can be applied to structs; e.g.:
5607///
5608/// ```
5609/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5610/// #[derive(ByteEq, Immutable, IntoBytes)]
5611/// #[repr(C)]
5612/// struct MyStruct {
5613/// # /*
5614///     ...
5615/// # */
5616/// }
5617/// ```
5618#[cfg(any(feature = "derive", test))]
5619#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5620pub use zerocopy_derive::SplitAt;
5621
5622#[cfg(feature = "alloc")]
5623#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5624#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5625mod alloc_support {
5626    use super::*;
5627
5628    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5629    /// vector. The new items are initialized with zeros.
5630    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5631    #[doc(hidden)]
5632    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5633    #[inline(always)]
5634    pub fn extend_vec_zeroed<T: FromZeros>(
5635        v: &mut Vec<T>,
5636        additional: usize,
5637    ) -> Result<(), AllocError> {
5638        <T as FromZeros>::extend_vec_zeroed(v, additional)
5639    }
5640
5641    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5642    /// items are initialized with zeros.
5643    ///
5644    /// # Panics
5645    ///
5646    /// Panics if `position > v.len()`.
5647    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5648    #[doc(hidden)]
5649    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5650    #[inline(always)]
5651    pub fn insert_vec_zeroed<T: FromZeros>(
5652        v: &mut Vec<T>,
5653        position: usize,
5654        additional: usize,
5655    ) -> Result<(), AllocError> {
5656        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5657    }
5658}
5659
5660#[cfg(feature = "alloc")]
5661#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
5662#[doc(hidden)]
5663pub use alloc_support::*;
5664
5665#[cfg(test)]
5666#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5667mod tests {
5668    use static_assertions::assert_impl_all;
5669
5670    use super::*;
5671    use crate::util::testutil::*;
5672
5673    // An unsized type.
5674    //
5675    // This is used to test the custom derives of our traits. The `[u8]` type
5676    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5677    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5678    #[repr(transparent)]
5679    struct Unsized([u8]);
5680
5681    impl Unsized {
5682        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5683            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5684            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5685            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5686            // guaranteed by the language spec, we can just change this since
5687            // it's in test code.
5688            //
5689            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5690            unsafe { mem::transmute(slc) }
5691        }
5692    }
5693
5694    #[test]
5695    fn test_known_layout() {
5696        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5697        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5698        // of `$ty`.
5699        macro_rules! test {
5700            ($ty:ty, $expect:expr) => {
5701                let expect = $expect;
5702                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5703                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5704                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5705            };
5706        }
5707
5708        let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
5709            align: NonZeroUsize::new(align).unwrap(),
5710            size_info: match _trailing_slice_elem_size {
5711                None => SizeInfo::Sized { size: offset },
5712                Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5713            },
5714        };
5715
5716        test!((), layout(0, 1, None));
5717        test!(u8, layout(1, 1, None));
5718        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5719        // platforms.
5720        test!(u64, layout(8, mem::align_of::<u64>(), None));
5721        test!(AU64, layout(8, 8, None));
5722
5723        test!(Option<&'static ()>, usize::LAYOUT);
5724
5725        test!([()], layout(0, 1, Some(0)));
5726        test!([u8], layout(0, 1, Some(1)));
5727        test!(str, layout(0, 1, Some(1)));
5728    }
5729
5730    #[cfg(feature = "derive")]
5731    #[test]
5732    fn test_known_layout_derive() {
5733        // In this and other files (`late_compile_pass.rs`,
5734        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5735        // modes of `derive(KnownLayout)` for the following combination of
5736        // properties:
5737        //
5738        // +------------+--------------------------------------+-----------+
5739        // |            |      trailing field properties       |           |
5740        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5741        // |------------+----------+----------------+----------+-----------|
5742        // |          N |        N |              N |        N |      KL00 |
5743        // |          N |        N |              N |        Y |      KL01 |
5744        // |          N |        N |              Y |        N |      KL02 |
5745        // |          N |        N |              Y |        Y |      KL03 |
5746        // |          N |        Y |              N |        N |      KL04 |
5747        // |          N |        Y |              N |        Y |      KL05 |
5748        // |          N |        Y |              Y |        N |      KL06 |
5749        // |          N |        Y |              Y |        Y |      KL07 |
5750        // |          Y |        N |              N |        N |      KL08 |
5751        // |          Y |        N |              N |        Y |      KL09 |
5752        // |          Y |        N |              Y |        N |      KL10 |
5753        // |          Y |        N |              Y |        Y |      KL11 |
5754        // |          Y |        Y |              N |        N |      KL12 |
5755        // |          Y |        Y |              N |        Y |      KL13 |
5756        // |          Y |        Y |              Y |        N |      KL14 |
5757        // |          Y |        Y |              Y |        Y |      KL15 |
5758        // +------------+----------+----------------+----------+-----------+
5759
5760        struct NotKnownLayout<T = ()> {
5761            _t: T,
5762        }
5763
5764        #[derive(KnownLayout)]
5765        #[repr(C)]
5766        struct AlignSize<const ALIGN: usize, const SIZE: usize>
5767        where
5768            elain::Align<ALIGN>: elain::Alignment,
5769        {
5770            _align: elain::Align<ALIGN>,
5771            size: [u8; SIZE],
5772        }
5773
5774        type AU16 = AlignSize<2, 2>;
5775        type AU32 = AlignSize<4, 4>;
5776
5777        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5778
5779        let sized_layout = |align, size| DstLayout {
5780            align: NonZeroUsize::new(align).unwrap(),
5781            size_info: SizeInfo::Sized { size },
5782        };
5783
5784        let unsized_layout = |align, elem_size, offset| DstLayout {
5785            align: NonZeroUsize::new(align).unwrap(),
5786            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5787        };
5788
5789        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5790        // |          N |        N |              N |        Y |      KL01 |
5791        #[allow(dead_code)]
5792        #[derive(KnownLayout)]
5793        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5794
5795        let expected = DstLayout::for_type::<KL01>();
5796
5797        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5798        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5799
5800        // ...with `align(N)`:
5801        #[allow(dead_code)]
5802        #[derive(KnownLayout)]
5803        #[repr(align(64))]
5804        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5805
5806        let expected = DstLayout::for_type::<KL01Align>();
5807
5808        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5809        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5810
5811        // ...with `packed`:
5812        #[allow(dead_code)]
5813        #[derive(KnownLayout)]
5814        #[repr(packed)]
5815        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5816
5817        let expected = DstLayout::for_type::<KL01Packed>();
5818
5819        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
5820        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
5821
5822        // ...with `packed(N)`:
5823        #[allow(dead_code)]
5824        #[derive(KnownLayout)]
5825        #[repr(packed(2))]
5826        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5827
5828        assert_impl_all!(KL01PackedN: KnownLayout);
5829
5830        let expected = DstLayout::for_type::<KL01PackedN>();
5831
5832        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
5833        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5834
5835        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5836        // |          N |        N |              Y |        Y |      KL03 |
5837        #[allow(dead_code)]
5838        #[derive(KnownLayout)]
5839        struct KL03(NotKnownLayout, u8);
5840
5841        let expected = DstLayout::for_type::<KL03>();
5842
5843        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
5844        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
5845
5846        // ... with `align(N)`
5847        #[allow(dead_code)]
5848        #[derive(KnownLayout)]
5849        #[repr(align(64))]
5850        struct KL03Align(NotKnownLayout<AU32>, u8);
5851
5852        let expected = DstLayout::for_type::<KL03Align>();
5853
5854        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
5855        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5856
5857        // ... with `packed`:
5858        #[allow(dead_code)]
5859        #[derive(KnownLayout)]
5860        #[repr(packed)]
5861        struct KL03Packed(NotKnownLayout<AU32>, u8);
5862
5863        let expected = DstLayout::for_type::<KL03Packed>();
5864
5865        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
5866        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
5867
5868        // ... with `packed(N)`
5869        #[allow(dead_code)]
5870        #[derive(KnownLayout)]
5871        #[repr(packed(2))]
5872        struct KL03PackedN(NotKnownLayout<AU32>, u8);
5873
5874        assert_impl_all!(KL03PackedN: KnownLayout);
5875
5876        let expected = DstLayout::for_type::<KL03PackedN>();
5877
5878        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
5879        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5880
5881        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5882        // |          N |        Y |              N |        Y |      KL05 |
5883        #[allow(dead_code)]
5884        #[derive(KnownLayout)]
5885        struct KL05<T>(u8, T);
5886
5887        fn _test_kl05<T>(t: T) -> impl KnownLayout {
5888            KL05(0u8, t)
5889        }
5890
5891        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5892        // |          N |        Y |              Y |        Y |      KL07 |
5893        #[allow(dead_code)]
5894        #[derive(KnownLayout)]
5895        struct KL07<T: KnownLayout>(u8, T);
5896
5897        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
5898            let _ = KL07(0u8, t);
5899        }
5900
5901        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5902        // |          Y |        N |              Y |        N |      KL10 |
5903        #[allow(dead_code)]
5904        #[derive(KnownLayout)]
5905        #[repr(C)]
5906        struct KL10(NotKnownLayout<AU32>, [u8]);
5907
5908        let expected = DstLayout::new_zst(None)
5909            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5910            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5911            .pad_to_align();
5912
5913        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
5914        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
5915
5916        // ...with `align(N)`:
5917        #[allow(dead_code)]
5918        #[derive(KnownLayout)]
5919        #[repr(C, align(64))]
5920        struct KL10Align(NotKnownLayout<AU32>, [u8]);
5921
5922        let repr_align = NonZeroUsize::new(64);
5923
5924        let expected = DstLayout::new_zst(repr_align)
5925            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
5926            .extend(<[u8] as KnownLayout>::LAYOUT, None)
5927            .pad_to_align();
5928
5929        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
5930        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
5931
5932        // ...with `packed`:
5933        #[allow(dead_code)]
5934        #[derive(KnownLayout)]
5935        #[repr(C, packed)]
5936        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
5937
5938        let repr_packed = NonZeroUsize::new(1);
5939
5940        let expected = DstLayout::new_zst(None)
5941            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5942            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5943            .pad_to_align();
5944
5945        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
5946        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
5947
5948        // ...with `packed(N)`:
5949        #[allow(dead_code)]
5950        #[derive(KnownLayout)]
5951        #[repr(C, packed(2))]
5952        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
5953
5954        let repr_packed = NonZeroUsize::new(2);
5955
5956        let expected = DstLayout::new_zst(None)
5957            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
5958            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
5959            .pad_to_align();
5960
5961        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
5962        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
5963
5964        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5965        // |          Y |        N |              Y |        Y |      KL11 |
5966        #[allow(dead_code)]
5967        #[derive(KnownLayout)]
5968        #[repr(C)]
5969        struct KL11(NotKnownLayout<AU64>, u8);
5970
5971        let expected = DstLayout::new_zst(None)
5972            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5973            .extend(<u8 as KnownLayout>::LAYOUT, None)
5974            .pad_to_align();
5975
5976        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
5977        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
5978
5979        // ...with `align(N)`:
5980        #[allow(dead_code)]
5981        #[derive(KnownLayout)]
5982        #[repr(C, align(64))]
5983        struct KL11Align(NotKnownLayout<AU64>, u8);
5984
5985        let repr_align = NonZeroUsize::new(64);
5986
5987        let expected = DstLayout::new_zst(repr_align)
5988            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
5989            .extend(<u8 as KnownLayout>::LAYOUT, None)
5990            .pad_to_align();
5991
5992        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
5993        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5994
5995        // ...with `packed`:
5996        #[allow(dead_code)]
5997        #[derive(KnownLayout)]
5998        #[repr(C, packed)]
5999        struct KL11Packed(NotKnownLayout<AU64>, u8);
6000
6001        let repr_packed = NonZeroUsize::new(1);
6002
6003        let expected = DstLayout::new_zst(None)
6004            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6005            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6006            .pad_to_align();
6007
6008        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6009        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6010
6011        // ...with `packed(N)`:
6012        #[allow(dead_code)]
6013        #[derive(KnownLayout)]
6014        #[repr(C, packed(2))]
6015        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6016
6017        let repr_packed = NonZeroUsize::new(2);
6018
6019        let expected = DstLayout::new_zst(None)
6020            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6021            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6022            .pad_to_align();
6023
6024        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6025        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6026
6027        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6028        // |          Y |        Y |              Y |        N |      KL14 |
6029        #[allow(dead_code)]
6030        #[derive(KnownLayout)]
6031        #[repr(C)]
6032        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6033
6034        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6035            _assert_kl(kl)
6036        }
6037
6038        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6039        // |          Y |        Y |              Y |        Y |      KL15 |
6040        #[allow(dead_code)]
6041        #[derive(KnownLayout)]
6042        #[repr(C)]
6043        struct KL15<T: KnownLayout>(u8, T);
6044
6045        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6046            let _ = KL15(0u8, t);
6047        }
6048
6049        // Test a variety of combinations of field types:
6050        //  - ()
6051        //  - u8
6052        //  - AU16
6053        //  - [()]
6054        //  - [u8]
6055        //  - [AU16]
6056
6057        #[allow(clippy::upper_case_acronyms, dead_code)]
6058        #[derive(KnownLayout)]
6059        #[repr(C)]
6060        struct KLTU<T, U: ?Sized>(T, U);
6061
6062        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6063
6064        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6065
6066        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6067
6068        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
6069
6070        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6071
6072        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
6073
6074        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6075
6076        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6077
6078        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6079
6080        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
6081
6082        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6083
6084        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6085
6086        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6087
6088        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6089
6090        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6091
6092        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6093
6094        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6095
6096        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6097
6098        // Test a variety of field counts.
6099
6100        #[derive(KnownLayout)]
6101        #[repr(C)]
6102        struct KLF0;
6103
6104        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6105
6106        #[derive(KnownLayout)]
6107        #[repr(C)]
6108        struct KLF1([u8]);
6109
6110        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6111
6112        #[derive(KnownLayout)]
6113        #[repr(C)]
6114        struct KLF2(NotKnownLayout<u8>, [u8]);
6115
6116        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6117
6118        #[derive(KnownLayout)]
6119        #[repr(C)]
6120        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6121
6122        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6123
6124        #[derive(KnownLayout)]
6125        #[repr(C)]
6126        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6127
6128        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6129    }
6130
6131    #[test]
6132    fn test_object_safety() {
6133        fn _takes_no_cell(_: &dyn Immutable) {}
6134        fn _takes_unaligned(_: &dyn Unaligned) {}
6135    }
6136
6137    #[test]
6138    fn test_from_zeros_only() {
6139        // Test types that implement `FromZeros` but not `FromBytes`.
6140
6141        assert!(!bool::new_zeroed());
6142        assert_eq!(char::new_zeroed(), '\0');
6143
6144        #[cfg(feature = "alloc")]
6145        {
6146            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6147            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6148
6149            assert_eq!(
6150                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6151                [false, false, false]
6152            );
6153            assert_eq!(
6154                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6155                ['\0', '\0', '\0']
6156            );
6157
6158            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6159            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6160        }
6161
6162        let mut string = "hello".to_string();
6163        let s: &mut str = string.as_mut();
6164        assert_eq!(s, "hello");
6165        s.zero();
6166        assert_eq!(s, "\0\0\0\0\0");
6167    }
6168
6169    #[test]
6170    fn test_zst_count_preserved() {
6171        // Test that, when an explicit count is provided to for a type with a
6172        // ZST trailing slice element, that count is preserved. This is
6173        // important since, for such types, all element counts result in objects
6174        // of the same size, and so the correct behavior is ambiguous. However,
6175        // preserving the count as requested by the user is the behavior that we
6176        // document publicly.
6177
6178        // FromZeros methods
6179        #[cfg(feature = "alloc")]
6180        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6181        #[cfg(feature = "alloc")]
6182        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6183
6184        // FromBytes methods
6185        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6186        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6187        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6188        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6189        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6190        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6191    }
6192
6193    #[test]
6194    fn test_read_write() {
6195        const VAL: u64 = 0x12345678;
6196        #[cfg(target_endian = "big")]
6197        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6198        #[cfg(target_endian = "little")]
6199        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6200        const ZEROS: [u8; 8] = [0u8; 8];
6201
6202        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6203
6204        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6205        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6206        // zeros.
6207        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6208        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6209        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6210        // The first 8 bytes are all zeros and the second 8 bytes are from
6211        // `VAL_BYTES`
6212        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6213        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6214        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6215
6216        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6217
6218        let mut bytes = [0u8; 8];
6219        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6220        assert_eq!(bytes, VAL_BYTES);
6221        let mut bytes = [0u8; 16];
6222        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6223        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6224        assert_eq!(bytes, want);
6225        let mut bytes = [0u8; 16];
6226        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6227        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6228        assert_eq!(bytes, want);
6229    }
6230
6231    #[test]
6232    #[cfg(feature = "std")]
6233    fn test_read_io_with_padding_soundness() {
6234        // This test is designed to exhibit potential UB in
6235        // `FromBytes::read_from_io`. (see #2319, #2320).
6236
6237        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6238        // will have inter-field padding between `x` and `y`.
6239        #[derive(FromBytes)]
6240        #[repr(C)]
6241        struct WithPadding {
6242            x: u8,
6243            y: u16,
6244        }
6245        struct ReadsInRead;
6246        impl std::io::Read for ReadsInRead {
6247            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6248                // This body branches on every byte of `buf`, ensuring that it
6249                // exhibits UB if any byte of `buf` is uninitialized.
6250                if buf.iter().all(|&x| x == 0) {
6251                    Ok(buf.len())
6252                } else {
6253                    buf.iter_mut().for_each(|x| *x = 0);
6254                    Ok(buf.len())
6255                }
6256            }
6257        }
6258        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6259    }
6260
6261    #[test]
6262    #[cfg(feature = "std")]
6263    fn test_read_write_io() {
6264        let mut long_buffer = [0, 0, 0, 0];
6265        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6266        assert_eq!(long_buffer, [255, 255, 0, 0]);
6267        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6268
6269        let mut short_buffer = [0, 0];
6270        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6271        assert_eq!(short_buffer, [255, 255]);
6272        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6273    }
6274
6275    #[test]
6276    fn test_try_from_bytes_try_read_from() {
6277        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6278        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6279
6280        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6281        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6282
6283        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6284        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6285
6286        // If we don't pass enough bytes, it fails.
6287        assert!(matches!(
6288            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6289            Err(TryReadError::Size(_))
6290        ));
6291        assert!(matches!(
6292            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6293            Err(TryReadError::Size(_))
6294        ));
6295        assert!(matches!(
6296            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6297            Err(TryReadError::Size(_))
6298        ));
6299
6300        // If we pass too many bytes, it fails.
6301        assert!(matches!(
6302            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6303            Err(TryReadError::Size(_))
6304        ));
6305
6306        // If we pass an invalid value, it fails.
6307        assert!(matches!(
6308            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6309            Err(TryReadError::Validity(_))
6310        ));
6311        assert!(matches!(
6312            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6313            Err(TryReadError::Validity(_))
6314        ));
6315        assert!(matches!(
6316            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6317            Err(TryReadError::Validity(_))
6318        ));
6319
6320        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6321        // alignment is 8, and since we read from two adjacent addresses one
6322        // byte apart, it is guaranteed that at least one of them (though
6323        // possibly both) will be misaligned.
6324        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6325        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6326        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6327
6328        assert_eq!(
6329            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6330            Ok((AU64(0), &[][..]))
6331        );
6332        assert_eq!(
6333            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6334            Ok((AU64(0), &[][..]))
6335        );
6336
6337        assert_eq!(
6338            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6339            Ok((&[][..], AU64(0)))
6340        );
6341        assert_eq!(
6342            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6343            Ok((&[][..], AU64(0)))
6344        );
6345    }
6346
6347    #[test]
6348    fn test_ref_from_mut_from() {
6349        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases
6350        // Exhaustive coverage for these methods is covered by the `Ref` tests above,
6351        // which these helper methods defer to.
6352
6353        let mut buf =
6354            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6355
6356        assert_eq!(
6357            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6358            [8, 9, 10, 11, 12, 13, 14, 15]
6359        );
6360        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6361        suffix.0 = 0x0101010101010101;
6362        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6363        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6364        assert_eq!(
6365            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6366            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6367        );
6368        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6369        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6370        suffix.0 = 0x0202020202020202;
6371        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6372        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6373        suffix[0] = 42;
6374        assert_eq!(
6375            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6376            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6377        );
6378        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6379        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6380    }
6381
6382    #[test]
6383    fn test_ref_from_mut_from_error() {
6384        // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases.
6385
6386        // Fail because the buffer is too large.
6387        let mut buf = Align::<[u8; 16], AU64>::default();
6388        // `buf.t` should be aligned to 8, so only the length check should fail.
6389        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6390        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6391        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6392        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6393
6394        // Fail because the buffer is too small.
6395        let mut buf = Align::<[u8; 4], AU64>::default();
6396        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6397        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6398        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6399        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6400        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6401        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6402        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6403        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6404        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6405        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6406        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6407        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6408
6409        // Fail because the alignment is insufficient.
6410        let mut buf = Align::<[u8; 13], AU64>::default();
6411        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6412        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6413        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6414        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6415        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6416        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6417        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6418        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6419    }
6420
6421    #[test]
6422    fn test_to_methods() {
6423        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6424        ///
6425        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6426        /// before `t` has been modified. `post_mutation` is the expected
6427        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6428        /// has had its bits flipped (by applying `^= 0xFF`).
6429        ///
6430        /// `N` is the size of `t` in bytes.
6431        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6432            t: &mut T,
6433            bytes: &[u8],
6434            post_mutation: &T,
6435        ) {
6436            // Test that we can access the underlying bytes, and that we get the
6437            // right bytes and the right number of bytes.
6438            assert_eq!(t.as_bytes(), bytes);
6439
6440            // Test that changes to the underlying byte slices are reflected in
6441            // the original object.
6442            t.as_mut_bytes()[0] ^= 0xFF;
6443            assert_eq!(t, post_mutation);
6444            t.as_mut_bytes()[0] ^= 0xFF;
6445
6446            // `write_to` rejects slices that are too small or too large.
6447            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6448            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6449
6450            // `write_to` works as expected.
6451            let mut bytes = [0; N];
6452            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6453            assert_eq!(bytes, t.as_bytes());
6454
6455            // `write_to_prefix` rejects slices that are too small.
6456            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6457
6458            // `write_to_prefix` works with exact-sized slices.
6459            let mut bytes = [0; N];
6460            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6461            assert_eq!(bytes, t.as_bytes());
6462
6463            // `write_to_prefix` works with too-large slices, and any bytes past
6464            // the prefix aren't modified.
6465            let mut too_many_bytes = vec![0; N + 1];
6466            too_many_bytes[N] = 123;
6467            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6468            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6469            assert_eq!(too_many_bytes[N], 123);
6470
6471            // `write_to_suffix` rejects slices that are too small.
6472            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6473
6474            // `write_to_suffix` works with exact-sized slices.
6475            let mut bytes = [0; N];
6476            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6477            assert_eq!(bytes, t.as_bytes());
6478
6479            // `write_to_suffix` works with too-large slices, and any bytes
6480            // before the suffix aren't modified.
6481            let mut too_many_bytes = vec![0; N + 1];
6482            too_many_bytes[0] = 123;
6483            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6484            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6485            assert_eq!(too_many_bytes[0], 123);
6486        }
6487
6488        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6489        #[repr(C)]
6490        struct Foo {
6491            a: u32,
6492            b: Wrapping<u32>,
6493            c: Option<NonZeroU32>,
6494        }
6495
6496        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6497            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6498        } else {
6499            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6500        };
6501        let post_mutation_expected_a =
6502            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6503        test::<_, 12>(
6504            &mut Foo { a: 1, b: Wrapping(2), c: None },
6505            expected_bytes.as_bytes(),
6506            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6507        );
6508        test::<_, 3>(
6509            Unsized::from_mut_slice(&mut [1, 2, 3]),
6510            &[1, 2, 3],
6511            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6512        );
6513    }
6514
6515    #[test]
6516    fn test_array() {
6517        #[derive(FromBytes, IntoBytes, Immutable)]
6518        #[repr(C)]
6519        struct Foo {
6520            a: [u16; 33],
6521        }
6522
6523        let foo = Foo { a: [0xFFFF; 33] };
6524        let expected = [0xFFu8; 66];
6525        assert_eq!(foo.as_bytes(), &expected[..]);
6526    }
6527
6528    #[test]
6529    fn test_new_zeroed() {
6530        assert!(!bool::new_zeroed());
6531        assert_eq!(u64::new_zeroed(), 0);
6532        // This test exists in order to exercise unsafe code, especially when
6533        // running under Miri.
6534        #[allow(clippy::unit_cmp)]
6535        {
6536            assert_eq!(<()>::new_zeroed(), ());
6537        }
6538    }
6539
6540    #[test]
6541    fn test_transparent_packed_generic_struct() {
6542        #[derive(IntoBytes, FromBytes, Unaligned)]
6543        #[repr(transparent)]
6544        #[allow(dead_code)] // We never construct this type
6545        struct Foo<T> {
6546            _t: T,
6547            _phantom: PhantomData<()>,
6548        }
6549
6550        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6551        assert_impl_all!(Foo<u8>: Unaligned);
6552
6553        #[derive(IntoBytes, FromBytes, Unaligned)]
6554        #[repr(C, packed)]
6555        #[allow(dead_code)] // We never construct this type
6556        struct Bar<T, U> {
6557            _t: T,
6558            _u: U,
6559        }
6560
6561        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6562    }
6563
6564    #[cfg(feature = "alloc")]
6565    mod alloc {
6566        use super::*;
6567
6568        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6569        #[test]
6570        fn test_extend_vec_zeroed() {
6571            // Test extending when there is an existing allocation.
6572            let mut v = vec![100u16, 200, 300];
6573            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6574            assert_eq!(v.len(), 6);
6575            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6576            drop(v);
6577
6578            // Test extending when there is no existing allocation.
6579            let mut v: Vec<u64> = Vec::new();
6580            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6581            assert_eq!(v.len(), 3);
6582            assert_eq!(&*v, &[0, 0, 0]);
6583            drop(v);
6584        }
6585
6586        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6587        #[test]
6588        fn test_extend_vec_zeroed_zst() {
6589            // Test extending when there is an existing (fake) allocation.
6590            let mut v = vec![(), (), ()];
6591            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6592            assert_eq!(v.len(), 6);
6593            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6594            drop(v);
6595
6596            // Test extending when there is no existing (fake) allocation.
6597            let mut v: Vec<()> = Vec::new();
6598            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6599            assert_eq!(&*v, &[(), (), ()]);
6600            drop(v);
6601        }
6602
6603        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6604        #[test]
6605        fn test_insert_vec_zeroed() {
6606            // Insert at start (no existing allocation).
6607            let mut v: Vec<u64> = Vec::new();
6608            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6609            assert_eq!(v.len(), 2);
6610            assert_eq!(&*v, &[0, 0]);
6611            drop(v);
6612
6613            // Insert at start.
6614            let mut v = vec![100u64, 200, 300];
6615            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6616            assert_eq!(v.len(), 5);
6617            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6618            drop(v);
6619
6620            // Insert at middle.
6621            let mut v = vec![100u64, 200, 300];
6622            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6623            assert_eq!(v.len(), 4);
6624            assert_eq!(&*v, &[100, 0, 200, 300]);
6625            drop(v);
6626
6627            // Insert at end.
6628            let mut v = vec![100u64, 200, 300];
6629            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6630            assert_eq!(v.len(), 4);
6631            assert_eq!(&*v, &[100, 200, 300, 0]);
6632            drop(v);
6633        }
6634
6635        #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
6636        #[test]
6637        fn test_insert_vec_zeroed_zst() {
6638            // Insert at start (no existing fake allocation).
6639            let mut v: Vec<()> = Vec::new();
6640            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6641            assert_eq!(v.len(), 2);
6642            assert_eq!(&*v, &[(), ()]);
6643            drop(v);
6644
6645            // Insert at start.
6646            let mut v = vec![(), (), ()];
6647            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6648            assert_eq!(v.len(), 5);
6649            assert_eq!(&*v, &[(), (), (), (), ()]);
6650            drop(v);
6651
6652            // Insert at middle.
6653            let mut v = vec![(), (), ()];
6654            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6655            assert_eq!(v.len(), 4);
6656            assert_eq!(&*v, &[(), (), (), ()]);
6657            drop(v);
6658
6659            // Insert at end.
6660            let mut v = vec![(), (), ()];
6661            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6662            assert_eq!(v.len(), 4);
6663            assert_eq!(&*v, &[(), (), (), ()]);
6664            drop(v);
6665        }
6666
6667        #[test]
6668        fn test_new_box_zeroed() {
6669            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6670        }
6671
6672        #[test]
6673        fn test_new_box_zeroed_array() {
6674            drop(<[u32; 0x1000]>::new_box_zeroed());
6675        }
6676
6677        #[test]
6678        fn test_new_box_zeroed_zst() {
6679            // This test exists in order to exercise unsafe code, especially
6680            // when running under Miri.
6681            #[allow(clippy::unit_cmp)]
6682            {
6683                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6684            }
6685        }
6686
6687        #[test]
6688        fn test_new_box_zeroed_with_elems() {
6689            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6690            assert_eq!(s.len(), 3);
6691            assert_eq!(&*s, &[0, 0, 0]);
6692            s[1] = 3;
6693            assert_eq!(&*s, &[0, 3, 0]);
6694        }
6695
6696        #[test]
6697        fn test_new_box_zeroed_with_elems_empty() {
6698            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6699            assert_eq!(s.len(), 0);
6700        }
6701
6702        #[test]
6703        fn test_new_box_zeroed_with_elems_zst() {
6704            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6705            assert_eq!(s.len(), 3);
6706            assert!(s.get(10).is_none());
6707            // This test exists in order to exercise unsafe code, especially
6708            // when running under Miri.
6709            #[allow(clippy::unit_cmp)]
6710            {
6711                assert_eq!(s[1], ());
6712            }
6713            s[2] = ();
6714        }
6715
6716        #[test]
6717        fn test_new_box_zeroed_with_elems_zst_empty() {
6718            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6719            assert_eq!(s.len(), 0);
6720        }
6721
6722        #[test]
6723        fn new_box_zeroed_with_elems_errors() {
6724            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6725
6726            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6727            assert_eq!(
6728                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6729                Err(AllocError)
6730            );
6731        }
6732    }
6733}