zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::multiple_unsafe_ops_per_block,
263    clippy::must_use_candidate,
264    clippy::must_use_unit,
265    clippy::obfuscated_if_else,
266    clippy::perf,
267    clippy::print_stdout,
268    clippy::return_self_not_must_use,
269    clippy::std_instead_of_core,
270    clippy::style,
271    clippy::suspicious,
272    clippy::todo,
273    clippy::undocumented_unsafe_blocks,
274    clippy::unimplemented,
275    clippy::unnested_or_patterns,
276    clippy::unwrap_used,
277    clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284    rustdoc::bare_urls,
285    rustdoc::broken_intra_doc_links,
286    rustdoc::invalid_codeblock_attributes,
287    rustdoc::invalid_html_tags,
288    rustdoc::invalid_rust_codeblocks,
289    rustdoc::missing_crate_level_docs,
290    rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295    // In tests, you get line numbers and have access to source code, so panic
296    // messages are less important. You also often unwrap a lot, which would
297    // make expect'ing instead very verbose.
298    clippy::unwrap_used,
299    // In tests, there's no harm to "panic risks" - the worst that can happen is
300    // that your test will fail, and you'll fix it. By contrast, panic risks in
301    // production code introduce the possibly of code panicking unexpectedly "in
302    // the field".
303    clippy::arithmetic_side_effects,
304    clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308    all(feature = "simd-nightly", target_arch = "arm"),
309    feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313    feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319    any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320    feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361    cell::{Cell, UnsafeCell},
362    cmp::Ordering,
363    fmt::{self, Debug, Display, Formatter},
364    hash::Hasher,
365    marker::PhantomData,
366    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367    num::{
368        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370    },
371    ops::{Deref, DerefMut},
372    ptr::{self, NonNull},
373    slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378#[doc(hidden)]
379pub use crate::pointer::invariant::{self, BecauseExclusive};
380#[doc(hidden)]
381pub use crate::pointer::PtrInner;
382pub use crate::{
383    byte_slice::*,
384    byteorder::*,
385    error::*,
386    r#ref::*,
387    split_at::{Split, SplitAt},
388    wrappers::*,
389};
390
391#[cfg(any(feature = "alloc", test, kani))]
392extern crate alloc;
393#[cfg(any(feature = "alloc", test))]
394use alloc::{boxed::Box, vec::Vec};
395#[cfg(any(feature = "alloc", test))]
396use core::alloc::Layout;
397
398use util::MetadataOf;
399
400// Used by `KnownLayout`.
401#[doc(hidden)]
402pub use crate::layout::*;
403// Used by `TryFromBytes::is_bit_valid`.
404#[doc(hidden)]
405pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
406// For each trait polyfill, as soon as the corresponding feature is stable, the
407// polyfill import will be unused because method/function resolution will prefer
408// the inherent method/function over a trait method/function. Thus, we suppress
409// the `unused_imports` warning.
410//
411// See the documentation on `util::polyfills` for more information.
412#[allow(unused_imports)]
413use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
414
415#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)))]
416const _: () = {
417    #[deprecated = "Development of zerocopy using cargo is not supported. Please use `cargo.sh` or `win-cargo.bat` instead."]
418    #[allow(unused)]
419    const WARNING: () = ();
420    #[warn(deprecated)]
421    WARNING
422};
423
424/// Implements [`KnownLayout`].
425///
426/// This derive analyzes various aspects of a type's layout that are needed for
427/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
428/// e.g.:
429///
430/// ```
431/// # use zerocopy_derive::KnownLayout;
432/// #[derive(KnownLayout)]
433/// struct MyStruct {
434/// # /*
435///     ...
436/// # */
437/// }
438///
439/// #[derive(KnownLayout)]
440/// enum MyEnum {
441/// #   V00,
442/// # /*
443///     ...
444/// # */
445/// }
446///
447/// #[derive(KnownLayout)]
448/// union MyUnion {
449/// #   variant: u8,
450/// # /*
451///     ...
452/// # */
453/// }
454/// ```
455///
456/// # Limitations
457///
458/// This derive cannot currently be applied to unsized structs without an
459/// explicit `repr` attribute.
460///
461/// Some invocations of this derive run afoul of a [known bug] in Rust's type
462/// privacy checker. For example, this code:
463///
464/// ```compile_fail,E0446
465/// use zerocopy::*;
466/// # use zerocopy_derive::*;
467///
468/// #[derive(KnownLayout)]
469/// #[repr(C)]
470/// pub struct PublicType {
471///     leading: Foo,
472///     trailing: Bar,
473/// }
474///
475/// #[derive(KnownLayout)]
476/// struct Foo;
477///
478/// #[derive(KnownLayout)]
479/// struct Bar;
480/// ```
481///
482/// ...results in a compilation error:
483///
484/// ```text
485/// error[E0446]: private type `Bar` in public interface
486///  --> examples/bug.rs:3:10
487///    |
488/// 3  | #[derive(KnownLayout)]
489///    |          ^^^^^^^^^^^ can't leak private type
490/// ...
491/// 14 | struct Bar;
492///    | ---------- `Bar` declared as private
493///    |
494///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
495/// ```
496///
497/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
498/// structs whose trailing field type is less public than the enclosing struct.
499///
500/// To work around this, mark the trailing field type `pub` and annotate it with
501/// `#[doc(hidden)]`; e.g.:
502///
503/// ```no_run
504/// use zerocopy::*;
505/// # use zerocopy_derive::*;
506///
507/// #[derive(KnownLayout)]
508/// #[repr(C)]
509/// pub struct PublicType {
510///     leading: Foo,
511///     trailing: Bar,
512/// }
513///
514/// #[derive(KnownLayout)]
515/// struct Foo;
516///
517/// #[doc(hidden)]
518/// #[derive(KnownLayout)]
519/// pub struct Bar; // <- `Bar` is now also `pub`
520/// ```
521///
522/// [known bug]: https://github.com/rust-lang/rust/issues/45713
523#[cfg(any(feature = "derive", test))]
524#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
525pub use zerocopy_derive::KnownLayout;
526// These exist so that code which was written against the old names will get
527// less confusing error messages when they upgrade to a more recent version of
528// zerocopy. On our MSRV toolchain, the error messages read, for example:
529//
530//   error[E0603]: trait `FromZeroes` is private
531//       --> examples/deprecated.rs:1:15
532//        |
533//   1    | use zerocopy::FromZeroes;
534//        |               ^^^^^^^^^^ private trait
535//        |
536//   note: the trait `FromZeroes` is defined here
537//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
538//        |
539//   1845 | use FromZeros as FromZeroes;
540//        |     ^^^^^^^^^^^^^^^^^^^^^^^
541//
542// The "note" provides enough context to make it easy to figure out how to fix
543// the error.
544#[allow(unused)]
545use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
546
547/// Indicates that zerocopy can reason about certain aspects of a type's layout.
548///
549/// This trait is required by many of zerocopy's APIs. It supports sized types,
550/// slices, and [slice DSTs](#dynamically-sized-types).
551///
552/// # Implementation
553///
554/// **Do not implement this trait yourself!** Instead, use
555/// [`#[derive(KnownLayout)]`][derive]; e.g.:
556///
557/// ```
558/// # use zerocopy_derive::KnownLayout;
559/// #[derive(KnownLayout)]
560/// struct MyStruct {
561/// # /*
562///     ...
563/// # */
564/// }
565///
566/// #[derive(KnownLayout)]
567/// enum MyEnum {
568/// # /*
569///     ...
570/// # */
571/// }
572///
573/// #[derive(KnownLayout)]
574/// union MyUnion {
575/// #   variant: u8,
576/// # /*
577///     ...
578/// # */
579/// }
580/// ```
581///
582/// This derive performs a sophisticated analysis to deduce the layout
583/// characteristics of types. You **must** implement this trait via the derive.
584///
585/// # Dynamically-sized types
586///
587/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
588///
589/// A slice DST is a type whose trailing field is either a slice or another
590/// slice DST, rather than a type with fixed size. For example:
591///
592/// ```
593/// #[repr(C)]
594/// struct PacketHeader {
595/// # /*
596///     ...
597/// # */
598/// }
599///
600/// #[repr(C)]
601/// struct Packet {
602///     header: PacketHeader,
603///     body: [u8],
604/// }
605/// ```
606///
607/// It can be useful to think of slice DSTs as a generalization of slices - in
608/// other words, a normal slice is just the special case of a slice DST with
609/// zero leading fields. In particular:
610/// - Like slices, slice DSTs can have different lengths at runtime
611/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
612///   or via other indirection such as `Box`
613/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
614///   encodes the number of elements in the trailing slice field
615///
616/// ## Slice DST layout
617///
618/// Just like other composite Rust types, the layout of a slice DST is not
619/// well-defined unless it is specified using an explicit `#[repr(...)]`
620/// attribute such as `#[repr(C)]`. [Other representations are
621/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
622/// example.
623///
624/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
625/// types][repr-c-structs], but the presence of a variable-length field
626/// introduces the possibility of *dynamic padding*. In particular, it may be
627/// necessary to add trailing padding *after* the trailing slice field in order
628/// to satisfy the outer type's alignment, and the amount of padding required
629/// may be a function of the length of the trailing slice field. This is just a
630/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
631/// but it can result in surprising behavior. For example, consider the
632/// following type:
633///
634/// ```
635/// #[repr(C)]
636/// struct Foo {
637///     a: u32,
638///     b: u8,
639///     z: [u16],
640/// }
641/// ```
642///
643/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
644/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
645/// `Foo`:
646///
647/// ```text
648/// byte offset | 01234567
649///       field | aaaab---
650///                    ><
651/// ```
652///
653/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
654/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
655/// round up to offset 6. This means that there is one byte of padding between
656/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
657/// then two bytes of padding after `z` in order to satisfy the overall
658/// alignment of `Foo`. The size of this instance is 8 bytes.
659///
660/// What about if `z` has length 1?
661///
662/// ```text
663/// byte offset | 01234567
664///       field | aaaab-zz
665/// ```
666///
667/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
668/// that we no longer need padding after `z` in order to satisfy `Foo`'s
669/// alignment. We've now seen two different values of `Foo` with two different
670/// lengths of `z`, but they both have the same size - 8 bytes.
671///
672/// What about if `z` has length 2?
673///
674/// ```text
675/// byte offset | 012345678901
676///       field | aaaab-zzzz--
677/// ```
678///
679/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
680/// size to 10, and so we now need another 2 bytes of padding after `z` to
681/// satisfy `Foo`'s alignment.
682///
683/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
684/// applied to slice DSTs, but it can be surprising that the amount of trailing
685/// padding becomes a function of the trailing slice field's length, and thus
686/// can only be computed at runtime.
687///
688/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
689/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
690///
691/// ## What is a valid size?
692///
693/// There are two places in zerocopy's API that we refer to "a valid size" of a
694/// type. In normal casts or conversions, where the source is a byte slice, we
695/// need to know whether the source byte slice is a valid size of the
696/// destination type. In prefix or suffix casts, we need to know whether *there
697/// exists* a valid size of the destination type which fits in the source byte
698/// slice and, if so, what the largest such size is.
699///
700/// As outlined above, a slice DST's size is defined by the number of elements
701/// in its trailing slice field. However, there is not necessarily a 1-to-1
702/// mapping between trailing slice field length and overall size. As we saw in
703/// the previous section with the type `Foo`, instances with both 0 and 1
704/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
705///
706/// When we say "x is a valid size of `T`", we mean one of two things:
707/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
708/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
709///   `T` with `len` trailing slice elements has size `x`
710///
711/// When we say "largest possible size of `T` that fits in a byte slice", we
712/// mean one of two things:
713/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
714///   `size_of::<T>()` bytes long
715/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
716///   that the instance of `T` with `len` trailing slice elements fits in the
717///   byte slice, and to choose the largest such `len`, if any
718///
719///
720/// # Safety
721///
722/// This trait does not convey any safety guarantees to code outside this crate.
723///
724/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
725/// releases of zerocopy may make backwards-breaking changes to these items,
726/// including changes that only affect soundness, which may cause code which
727/// uses those items to silently become unsound.
728///
729#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
730#[cfg_attr(
731    not(feature = "derive"),
732    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
733)]
734#[cfg_attr(
735    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
736    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
737)]
738pub unsafe trait KnownLayout {
739    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
740    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
741    // it likely won't be in the future, but there's no reason not to be
742    // forwards-compatible with object safety.
743    #[doc(hidden)]
744    fn only_derive_is_allowed_to_implement_this_trait()
745    where
746        Self: Sized;
747
748    /// The type of metadata stored in a pointer to `Self`.
749    ///
750    /// This is `()` for sized types and [`usize`] for slice DSTs.
751    type PointerMetadata: PointerMetadata;
752
753    /// A maybe-uninitialized analog of `Self`
754    ///
755    /// # Safety
756    ///
757    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
758    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
759    #[doc(hidden)]
760    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
761
762    /// The layout of `Self`.
763    ///
764    /// # Safety
765    ///
766    /// Callers may assume that `LAYOUT` accurately reflects the layout of
767    /// `Self`. In particular:
768    /// - `LAYOUT.align` is equal to `Self`'s alignment
769    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
770    ///   where `size == size_of::<Self>()`
771    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
772    ///   SizeInfo::SliceDst(slice_layout)` where:
773    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
774    ///     slice elements is equal to `slice_layout.offset +
775    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
776    ///     of `LAYOUT.align`
777    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
778    ///     slice_layout.elem_size * elems, size)` are padding and must not be
779    ///     assumed to be initialized
780    #[doc(hidden)]
781    const LAYOUT: DstLayout;
782
783    /// SAFETY: The returned pointer has the same address and provenance as
784    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
785    /// elements in its trailing slice.
786    #[doc(hidden)]
787    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
788
789    /// Extracts the metadata from a pointer to `Self`.
790    ///
791    /// # Safety
792    ///
793    /// `pointer_to_metadata` always returns the correct metadata stored in
794    /// `ptr`.
795    #[doc(hidden)]
796    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
797
798    /// Computes the length of the byte range addressed by `ptr`.
799    ///
800    /// Returns `None` if the resulting length would not fit in an `usize`.
801    ///
802    /// # Safety
803    ///
804    /// Callers may assume that `size_of_val_raw` always returns the correct
805    /// size.
806    ///
807    /// Callers may assume that, if `ptr` addresses a byte range whose length
808    /// fits in an `usize`, this will return `Some`.
809    #[doc(hidden)]
810    #[must_use]
811    #[inline(always)]
812    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
813        let meta = Self::pointer_to_metadata(ptr.as_ptr());
814        // SAFETY: `size_for_metadata` promises to only return `None` if the
815        // resulting size would not fit in a `usize`.
816        Self::size_for_metadata(meta)
817    }
818
819    #[doc(hidden)]
820    #[must_use]
821    #[inline(always)]
822    fn raw_dangling() -> NonNull<Self> {
823        let meta = Self::PointerMetadata::from_elem_count(0);
824        Self::raw_from_ptr_len(NonNull::dangling(), meta)
825    }
826
827    /// Computes the size of an object of type `Self` with the given pointer
828    /// metadata.
829    ///
830    /// # Safety
831    ///
832    /// `size_for_metadata` promises to return `None` if and only if the
833    /// resulting size would not fit in a [`usize`]. Note that the returned size
834    /// could exceed the actual maximum valid size of an allocated object,
835    /// [`isize::MAX`].
836    ///
837    /// # Examples
838    ///
839    /// ```
840    /// use zerocopy::KnownLayout;
841    ///
842    /// assert_eq!(u8::size_for_metadata(()), Some(1));
843    /// assert_eq!(u16::size_for_metadata(()), Some(2));
844    /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
845    /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
846    ///
847    /// // This size exceeds the maximum valid object size (`isize::MAX`):
848    /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
849    ///
850    /// // This size, if computed, would exceed `usize::MAX`:
851    /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
852    /// ```
853    #[inline(always)]
854    fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
855        meta.size_for_metadata(Self::LAYOUT)
856    }
857}
858
859/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
860#[inline(always)]
861pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
862where
863    T: ?Sized + KnownLayout<PointerMetadata = usize>,
864{
865    trait LayoutFacts {
866        const SIZE_INFO: TrailingSliceLayout;
867    }
868
869    impl<T: ?Sized> LayoutFacts for T
870    where
871        T: KnownLayout<PointerMetadata = usize>,
872    {
873        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
874            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
875            crate::SizeInfo::SliceDst(info) => info,
876        };
877    }
878
879    T::SIZE_INFO
880}
881
882/// The metadata associated with a [`KnownLayout`] type.
883#[doc(hidden)]
884pub trait PointerMetadata: Copy + Eq + Debug {
885    /// Constructs a `Self` from an element count.
886    ///
887    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
888    /// `elems`. No other types are currently supported.
889    fn from_elem_count(elems: usize) -> Self;
890
891    /// Converts `self` to an element count.
892    ///
893    /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns
894    /// `self`. No other types are currently supported.
895    fn to_elem_count(self) -> usize;
896
897    /// Computes the size of the object with the given layout and pointer
898    /// metadata.
899    ///
900    /// # Panics
901    ///
902    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
903    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
904    /// panic.
905    ///
906    /// # Safety
907    ///
908    /// `size_for_metadata` promises to only return `None` if the resulting size
909    /// would not fit in a `usize`.
910    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
911}
912
913impl PointerMetadata for () {
914    #[inline]
915    #[allow(clippy::unused_unit)]
916    fn from_elem_count(_elems: usize) -> () {}
917
918    #[inline]
919    fn to_elem_count(self) -> usize {
920        0
921    }
922
923    #[inline]
924    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
925        match layout.size_info {
926            SizeInfo::Sized { size } => Some(size),
927            // NOTE: This branch is unreachable, but we return `None` rather
928            // than `unreachable!()` to avoid generating panic paths.
929            SizeInfo::SliceDst(_) => None,
930        }
931    }
932}
933
934impl PointerMetadata for usize {
935    #[inline]
936    fn from_elem_count(elems: usize) -> usize {
937        elems
938    }
939
940    #[inline]
941    fn to_elem_count(self) -> usize {
942        self
943    }
944
945    #[inline]
946    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
947        match layout.size_info {
948            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
949                let slice_len = elem_size.checked_mul(self)?;
950                let without_padding = offset.checked_add(slice_len)?;
951                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
952            }
953            // NOTE: This branch is unreachable, but we return `None` rather
954            // than `unreachable!()` to avoid generating panic paths.
955            SizeInfo::Sized { .. } => None,
956        }
957    }
958}
959
960// SAFETY: Delegates safety to `DstLayout::for_slice`.
961unsafe impl<T> KnownLayout for [T] {
962    #[allow(clippy::missing_inline_in_public_items, dead_code)]
963    #[cfg_attr(
964        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
965        coverage(off)
966    )]
967    fn only_derive_is_allowed_to_implement_this_trait()
968    where
969        Self: Sized,
970    {
971    }
972
973    type PointerMetadata = usize;
974
975    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
976    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
977    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
978    // identical, because they both lack a fixed-sized prefix and because they
979    // inherit the alignments of their inner element type (which are identical)
980    // [2][3].
981    //
982    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
983    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
984    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
985    // back-to-back [2][3].
986    //
987    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
988    //
989    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
990    //   `T`
991    //
992    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
993    //
994    //   Slices have the same layout as the section of the array they slice.
995    //
996    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
997    //
998    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
999    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
1000    //   element of the array is offset from the start of the array by `n *
1001    //   size_of::<T>()` bytes.
1002    type MaybeUninit = [CoreMaybeUninit<T>];
1003
1004    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
1005
1006    // SAFETY: `.cast` preserves address and provenance. The returned pointer
1007    // refers to an object with `elems` elements by construction.
1008    #[inline(always)]
1009    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
1010        // FIXME(#67): Remove this allow. See NonNullExt for more details.
1011        #[allow(unstable_name_collisions)]
1012        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
1013    }
1014
1015    #[inline(always)]
1016    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1017        #[allow(clippy::as_conversions)]
1018        let slc = ptr as *const [()];
1019
1020        // SAFETY:
1021        // - `()` has alignment 1, so `slc` is trivially aligned.
1022        // - `slc` was derived from a non-null pointer.
1023        // - The size is 0 regardless of the length, so it is sound to
1024        //   materialize a reference regardless of location.
1025        // - By invariant, `self.ptr` has valid provenance.
1026        let slc = unsafe { &*slc };
1027
1028        // This is correct because the preceding `as` cast preserves the number
1029        // of slice elements. [1]
1030        //
1031        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1032        //
1033        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
1034        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1035        //   elements in this slice. Casts between these raw pointer types
1036        //   preserve the number of elements. ... The same holds for `str` and
1037        //   any compound type whose unsized tail is a slice type, such as
1038        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1039        slc.len()
1040    }
1041}
1042
1043#[rustfmt::skip]
1044impl_known_layout!(
1045    (),
1046    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1047    bool, char,
1048    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1049    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1050);
1051#[rustfmt::skip]
1052#[cfg(feature = "float-nightly")]
1053impl_known_layout!(
1054    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1055    f16,
1056    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1057    f128
1058);
1059#[rustfmt::skip]
1060impl_known_layout!(
1061    T         => Option<T>,
1062    T: ?Sized => PhantomData<T>,
1063    T         => Wrapping<T>,
1064    T         => CoreMaybeUninit<T>,
1065    T: ?Sized => *const T,
1066    T: ?Sized => *mut T,
1067    T: ?Sized => &'_ T,
1068    T: ?Sized => &'_ mut T,
1069);
1070impl_known_layout!(const N: usize, T => [T; N]);
1071
1072// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1073// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1074//
1075// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1076//
1077//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1078//   `T`
1079//
1080// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1081//
1082//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1083//   `T`.
1084//
1085// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1086//
1087//   `Cell<T>` has the same in-memory representation as `T`.
1088#[allow(clippy::multiple_unsafe_ops_per_block)]
1089const _: () = unsafe {
1090    unsafe_impl_known_layout!(
1091        #[repr([u8])]
1092        str
1093    );
1094    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1095    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1096    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1097};
1098
1099// SAFETY:
1100// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1101//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1102//   - Fixed prefix size
1103//   - Alignment
1104//   - (For DSTs) trailing slice element size
1105// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1106//   require the same kind of pointer metadata, and thus it is valid to perform
1107//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1108//   preserves referent size (ie, `size_of_val_raw`).
1109const _: () = unsafe {
1110    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1111};
1112
1113// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1114// union fields being treated uniformly since they behave similarly to each
1115// other in terms of projecting validity – specifically, for a type `T` with
1116// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1117// have validity `V`. By contrast, if `T` is an enum or union type, then
1118// validity is not straightforwardly recursive in this way.
1119#[doc(hidden)]
1120pub const STRUCT_VARIANT_ID: i128 = -1;
1121#[doc(hidden)]
1122pub const UNION_VARIANT_ID: i128 = -2;
1123#[doc(hidden)]
1124pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1125
1126/// # Safety
1127///
1128/// `Self::ProjectToTag` must satisfy its safety invariant.
1129#[doc(hidden)]
1130pub unsafe trait HasTag {
1131    fn only_derive_is_allowed_to_implement_this_trait()
1132    where
1133        Self: Sized;
1134
1135    /// The type's enum tag, or `()` for non-enum types.
1136    type Tag: Immutable;
1137
1138    /// A pointer projection from `Self` to its tag.
1139    ///
1140    /// # Safety
1141    ///
1142    /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1143    /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1144    type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1145}
1146
1147/// Projects a given field from `Self`.
1148///
1149/// All implementations of `HasField` for a particular field `f` in `Self`
1150/// should use the same `Field` type; this ensures that `Field` is inferable
1151/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1152///
1153/// # Safety
1154///
1155/// A field `f` is `HasField` for `Self` if and only if:
1156///
1157/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1158///   `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1159///   `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1160///   of the enum variant in which `f` appears. Note that `Self` does not need
1161///   to actually *be* such a type – it just needs to have the same layout as
1162///   such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1163///   has the same layout as that enum.
1164/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1165///   if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1166/// - `Field` is a type with the same visibility as `f`.
1167/// - `Type` has the same type as `f`.
1168///
1169/// The caller must **not** assume that a pointer's referent being aligned
1170/// implies that calling `project` on that pointer will result in a pointer to
1171/// an aligned referent. For example, `HasField` may be implemented for
1172/// `#[repr(packed)]` structs.
1173///
1174/// The implementation of `project` must satisfy its safety post-condition.
1175#[doc(hidden)]
1176pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1177    HasTag
1178{
1179    fn only_derive_is_allowed_to_implement_this_trait()
1180    where
1181        Self: Sized;
1182
1183    /// The type of the field.
1184    type Type: ?Sized;
1185
1186    /// Projects from `slf` to the field.
1187    ///
1188    /// Users should generally not call `project` directly, and instead should
1189    /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1190    ///
1191    /// # Safety
1192    ///
1193    /// The returned pointer refers to a non-strict subset of the bytes of
1194    /// `slf`'s referent, and has the same provenance as `slf`.
1195    #[must_use]
1196    fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1197}
1198
1199/// Projects a given field from `Self`.
1200///
1201/// Implementations of this trait encode the conditions under which a field can
1202/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1203/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1204/// other words, it is a type-level function over invariants; `I` goes in,
1205/// `Self::Invariants` comes out.
1206///
1207/// # Safety
1208///
1209/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1210/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1211/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1212/// conforms to `T::Invariants`.
1213#[doc(hidden)]
1214pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1215    HasField<Field, VARIANT_ID, FIELD_ID>
1216where
1217    I: invariant::Invariants,
1218{
1219    fn only_derive_is_allowed_to_implement_this_trait()
1220    where
1221        Self: Sized;
1222
1223    /// The invariants of the projected field pointer, with respect to the
1224    /// invariants, `I`, of the containing pointer. The aliasing dimension of
1225    /// the invariants is guaranteed to remain unchanged.
1226    type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1227
1228    /// The failure mode of projection. `()` if the projection is fallible,
1229    /// otherwise [`core::convert::Infallible`].
1230    type Error;
1231
1232    /// Is the given field projectable from `ptr`?
1233    ///
1234    /// If a field with [`Self::Invariants`] is projectable from the referent,
1235    /// this function produces an `Ok(ptr)` from which the projection can be
1236    /// made; otherwise `Err`.
1237    ///
1238    /// This method must be overriden if the field's projectability depends on
1239    /// the value of the bytes in `ptr`.
1240    #[inline(always)]
1241    fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1242        trait IsInfallible {
1243            const IS_INFALLIBLE: bool;
1244        }
1245
1246        struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1247            PhantomData<(Field, I, T)>,
1248        )
1249        where
1250            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1251            I: invariant::Invariants;
1252
1253        impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1254            for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1255        where
1256            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1257            I: invariant::Invariants,
1258        {
1259            const IS_INFALLIBLE: bool = {
1260                let is_infallible = match VARIANT_ID {
1261                    // For nondestructive projections of struct and union
1262                    // fields, the projected field's satisfaction of
1263                    // `Invariants` does not depend on the value of the
1264                    // referent. This default implementation of `is_projectable`
1265                    // is non-destructive, as it does not overwrite any part of
1266                    // the referent.
1267                    crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1268                    _enum_variant => {
1269                        use crate::invariant::{Validity, ValidityKind};
1270                        match I::Validity::KIND {
1271                            // The `Uninit` and `Initialized` validity
1272                            // invariants do not depend on the enum's tag. In
1273                            // particular, we don't actually care about what
1274                            // variant is present – we can treat *any* range of
1275                            // uninitialized or initialized memory as containing
1276                            // an uninitialized or initialized instance of *any*
1277                            // type – the type itself is irrelevant.
1278                            ValidityKind::Uninit | ValidityKind::Initialized => true,
1279                            // The projectability of an enum field from an
1280                            // `AsInitialized` or `Valid` state is a dynamic
1281                            // property of its tag.
1282                            ValidityKind::AsInitialized | ValidityKind::Valid => false,
1283                        }
1284                    }
1285                };
1286                const_assert!(is_infallible);
1287                is_infallible
1288            };
1289        }
1290
1291        const_assert!(
1292            <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1293        );
1294
1295        Ok(())
1296    }
1297}
1298
1299/// Analyzes whether a type is [`FromZeros`].
1300///
1301/// This derive analyzes, at compile time, whether the annotated type satisfies
1302/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1303/// supertraits if it is sound to do so. This derive can be applied to structs,
1304/// enums, and unions; e.g.:
1305///
1306/// ```
1307/// # use zerocopy_derive::{FromZeros, Immutable};
1308/// #[derive(FromZeros)]
1309/// struct MyStruct {
1310/// # /*
1311///     ...
1312/// # */
1313/// }
1314///
1315/// #[derive(FromZeros)]
1316/// #[repr(u8)]
1317/// enum MyEnum {
1318/// #   Variant0,
1319/// # /*
1320///     ...
1321/// # */
1322/// }
1323///
1324/// #[derive(FromZeros, Immutable)]
1325/// union MyUnion {
1326/// #   variant: u8,
1327/// # /*
1328///     ...
1329/// # */
1330/// }
1331/// ```
1332///
1333/// [safety conditions]: trait@FromZeros#safety
1334///
1335/// # Analysis
1336///
1337/// *This section describes, roughly, the analysis performed by this derive to
1338/// determine whether it is sound to implement `FromZeros` for a given type.
1339/// Unless you are modifying the implementation of this derive, or attempting to
1340/// manually implement `FromZeros` for a type yourself, you don't need to read
1341/// this section.*
1342///
1343/// If a type has the following properties, then this derive can implement
1344/// `FromZeros` for that type:
1345///
1346/// - If the type is a struct, all of its fields must be `FromZeros`.
1347/// - If the type is an enum:
1348///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1349///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1350///   - It must have a variant with a discriminant/tag of `0`, and its fields
1351///     must be `FromZeros`. See [the reference] for a description of
1352///     discriminant values are specified.
1353///   - The fields of that variant must be `FromZeros`.
1354///
1355/// This analysis is subject to change. Unsafe code may *only* rely on the
1356/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1357/// implementation details of this derive.
1358///
1359/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1360///
1361/// ## Why isn't an explicit representation required for structs?
1362///
1363/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1364/// that structs are marked with `#[repr(C)]`.
1365///
1366/// Per the [Rust reference](reference),
1367///
1368/// > The representation of a type can change the padding between fields, but
1369/// > does not change the layout of the fields themselves.
1370///
1371/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1372///
1373/// Since the layout of structs only consists of padding bytes and field bytes,
1374/// a struct is soundly `FromZeros` if:
1375/// 1. its padding is soundly `FromZeros`, and
1376/// 2. its fields are soundly `FromZeros`.
1377///
1378/// The answer to the first question is always yes: padding bytes do not have
1379/// any validity constraints. A [discussion] of this question in the Unsafe Code
1380/// Guidelines Working Group concluded that it would be virtually unimaginable
1381/// for future versions of rustc to add validity constraints to padding bytes.
1382///
1383/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1384///
1385/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1386/// its fields are `FromZeros`.
1387// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1388// attribute.
1389#[cfg(any(feature = "derive", test))]
1390#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1391pub use zerocopy_derive::FromZeros;
1392/// Analyzes whether a type is [`Immutable`].
1393///
1394/// This derive analyzes, at compile time, whether the annotated type satisfies
1395/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1396/// sound to do so. This derive can be applied to structs, enums, and unions;
1397/// e.g.:
1398///
1399/// ```
1400/// # use zerocopy_derive::Immutable;
1401/// #[derive(Immutable)]
1402/// struct MyStruct {
1403/// # /*
1404///     ...
1405/// # */
1406/// }
1407///
1408/// #[derive(Immutable)]
1409/// enum MyEnum {
1410/// #   Variant0,
1411/// # /*
1412///     ...
1413/// # */
1414/// }
1415///
1416/// #[derive(Immutable)]
1417/// union MyUnion {
1418/// #   variant: u8,
1419/// # /*
1420///     ...
1421/// # */
1422/// }
1423/// ```
1424///
1425/// # Analysis
1426///
1427/// *This section describes, roughly, the analysis performed by this derive to
1428/// determine whether it is sound to implement `Immutable` for a given type.
1429/// Unless you are modifying the implementation of this derive, you don't need
1430/// to read this section.*
1431///
1432/// If a type has the following properties, then this derive can implement
1433/// `Immutable` for that type:
1434///
1435/// - All fields must be `Immutable`.
1436///
1437/// This analysis is subject to change. Unsafe code may *only* rely on the
1438/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1439/// implementation details of this derive.
1440///
1441/// [safety conditions]: trait@Immutable#safety
1442#[cfg(any(feature = "derive", test))]
1443#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1444pub use zerocopy_derive::Immutable;
1445
1446/// Types which are free from interior mutability.
1447///
1448/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1449/// by ownership or an exclusive (`&mut`) borrow.
1450///
1451/// # Implementation
1452///
1453/// **Do not implement this trait yourself!** Instead, use
1454/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1455/// e.g.:
1456///
1457/// ```
1458/// # use zerocopy_derive::Immutable;
1459/// #[derive(Immutable)]
1460/// struct MyStruct {
1461/// # /*
1462///     ...
1463/// # */
1464/// }
1465///
1466/// #[derive(Immutable)]
1467/// enum MyEnum {
1468/// # /*
1469///     ...
1470/// # */
1471/// }
1472///
1473/// #[derive(Immutable)]
1474/// union MyUnion {
1475/// #   variant: u8,
1476/// # /*
1477///     ...
1478/// # */
1479/// }
1480/// ```
1481///
1482/// This derive performs a sophisticated, compile-time safety analysis to
1483/// determine whether a type is `Immutable`.
1484///
1485/// # Safety
1486///
1487/// Unsafe code outside of this crate must not make any assumptions about `T`
1488/// based on `T: Immutable`. We reserve the right to relax the requirements for
1489/// `Immutable` in the future, and if unsafe code outside of this crate makes
1490/// assumptions based on `T: Immutable`, future relaxations may cause that code
1491/// to become unsound.
1492///
1493// # Safety (Internal)
1494//
1495// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1496// `t: &T`, `t` does not permit interior mutation of its referent. Because
1497// [`UnsafeCell`] is the only type which permits interior mutation, it is
1498// sufficient (though not necessary) to guarantee that `T` contains no
1499// `UnsafeCell`s.
1500//
1501// [`UnsafeCell`]: core::cell::UnsafeCell
1502#[cfg_attr(
1503    feature = "derive",
1504    doc = "[derive]: zerocopy_derive::Immutable",
1505    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1506)]
1507#[cfg_attr(
1508    not(feature = "derive"),
1509    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1510    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1511)]
1512#[cfg_attr(
1513    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1514    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1515)]
1516pub unsafe trait Immutable {
1517    // The `Self: Sized` bound makes it so that `Immutable` is still object
1518    // safe.
1519    #[doc(hidden)]
1520    fn only_derive_is_allowed_to_implement_this_trait()
1521    where
1522        Self: Sized;
1523}
1524
1525/// Implements [`TryFromBytes`].
1526///
1527/// This derive synthesizes the runtime checks required to check whether a
1528/// sequence of initialized bytes corresponds to a valid instance of a type.
1529/// This derive can be applied to structs, enums, and unions; e.g.:
1530///
1531/// ```
1532/// # use zerocopy_derive::{TryFromBytes, Immutable};
1533/// #[derive(TryFromBytes)]
1534/// struct MyStruct {
1535/// # /*
1536///     ...
1537/// # */
1538/// }
1539///
1540/// #[derive(TryFromBytes)]
1541/// #[repr(u8)]
1542/// enum MyEnum {
1543/// #   V00,
1544/// # /*
1545///     ...
1546/// # */
1547/// }
1548///
1549/// #[derive(TryFromBytes, Immutable)]
1550/// union MyUnion {
1551/// #   variant: u8,
1552/// # /*
1553///     ...
1554/// # */
1555/// }
1556/// ```
1557///
1558/// # Portability
1559///
1560/// To ensure consistent endianness for enums with multi-byte representations,
1561/// explicitly specify and convert each discriminant using `.to_le()` or
1562/// `.to_be()`; e.g.:
1563///
1564/// ```
1565/// # use zerocopy_derive::TryFromBytes;
1566/// // `DataStoreVersion` is encoded in little-endian.
1567/// #[derive(TryFromBytes)]
1568/// #[repr(u32)]
1569/// pub enum DataStoreVersion {
1570///     /// Version 1 of the data store.
1571///     V1 = 9u32.to_le(),
1572///
1573///     /// Version 2 of the data store.
1574///     V2 = 10u32.to_le(),
1575/// }
1576/// ```
1577///
1578/// [safety conditions]: trait@TryFromBytes#safety
1579#[cfg(any(feature = "derive", test))]
1580#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1581pub use zerocopy_derive::TryFromBytes;
1582
1583/// Types for which some bit patterns are valid.
1584///
1585/// A memory region of the appropriate length which contains initialized bytes
1586/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1587/// bytes corresponds to a [*valid instance*] of that type. For example,
1588/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1589/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1590/// `1`.
1591///
1592/// # Implementation
1593///
1594/// **Do not implement this trait yourself!** Instead, use
1595/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1596///
1597/// ```
1598/// # use zerocopy_derive::{TryFromBytes, Immutable};
1599/// #[derive(TryFromBytes)]
1600/// struct MyStruct {
1601/// # /*
1602///     ...
1603/// # */
1604/// }
1605///
1606/// #[derive(TryFromBytes)]
1607/// #[repr(u8)]
1608/// enum MyEnum {
1609/// #   V00,
1610/// # /*
1611///     ...
1612/// # */
1613/// }
1614///
1615/// #[derive(TryFromBytes, Immutable)]
1616/// union MyUnion {
1617/// #   variant: u8,
1618/// # /*
1619///     ...
1620/// # */
1621/// }
1622/// ```
1623///
1624/// This derive ensures that the runtime check of whether bytes correspond to a
1625/// valid instance is sound. You **must** implement this trait via the derive.
1626///
1627/// # What is a "valid instance"?
1628///
1629/// In Rust, each type has *bit validity*, which refers to the set of bit
1630/// patterns which may appear in an instance of that type. It is impossible for
1631/// safe Rust code to produce values which violate bit validity (ie, values
1632/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1633/// invalid value, this is considered [undefined behavior].
1634///
1635/// Rust's bit validity rules are currently being decided, which means that some
1636/// types have three classes of bit patterns: those which are definitely valid,
1637/// and whose validity is documented in the language; those which may or may not
1638/// be considered valid at some point in the future; and those which are
1639/// definitely invalid.
1640///
1641/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1642/// be valid if its validity is a documented guarantee provided by the
1643/// language.
1644///
1645/// For most use cases, Rust's current guarantees align with programmers'
1646/// intuitions about what ought to be valid. As a result, zerocopy's
1647/// conservatism should not affect most users.
1648///
1649/// If you are negatively affected by lack of support for a particular type,
1650/// we encourage you to let us know by [filing an issue][github-repo].
1651///
1652/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1653///
1654/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1655/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1656/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1657/// IntoBytes`, there exist values of `t: T` such that
1658/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1659/// generally assume that values produced by `IntoBytes` will necessarily be
1660/// accepted as valid by `TryFromBytes`.
1661///
1662/// # Safety
1663///
1664/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1665/// or representation of `T`. It merely provides the ability to perform a
1666/// validity check at runtime via methods like [`try_ref_from_bytes`].
1667///
1668/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1669/// Future releases of zerocopy may make backwards-breaking changes to these
1670/// items, including changes that only affect soundness, which may cause code
1671/// which uses those items to silently become unsound.
1672///
1673/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1674/// [github-repo]: https://github.com/google/zerocopy
1675/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1676/// [*valid instance*]: #what-is-a-valid-instance
1677#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1678#[cfg_attr(
1679    not(feature = "derive"),
1680    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1681)]
1682#[cfg_attr(
1683    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1684    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1685)]
1686pub unsafe trait TryFromBytes {
1687    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1688    // safe.
1689    #[doc(hidden)]
1690    fn only_derive_is_allowed_to_implement_this_trait()
1691    where
1692        Self: Sized;
1693
1694    /// Does a given memory range contain a valid instance of `Self`?
1695    ///
1696    /// # Safety
1697    ///
1698    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1699    /// `*candidate` contains a valid `Self`.
1700    ///
1701    /// # Panics
1702    ///
1703    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1704    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1705    /// panicking. (We support user-defined validation routines; so long as
1706    /// these routines are not required to be `unsafe`, there is no way to
1707    /// ensure that these do not generate panics.)
1708    ///
1709    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1710    /// either panic or fail to compile if called on a pointer with [`Shared`]
1711    /// aliasing when `Self: !Immutable`.
1712    ///
1713    /// [`UnsafeCell`]: core::cell::UnsafeCell
1714    /// [`Shared`]: invariant::Shared
1715    #[doc(hidden)]
1716    fn is_bit_valid<A>(candidate: Maybe<'_, Self, A>) -> bool
1717    where
1718        A: invariant::Alignment;
1719
1720    /// Attempts to interpret the given `source` as a `&Self`.
1721    ///
1722    /// If the bytes of `source` are a valid instance of `Self`, this method
1723    /// returns a reference to those bytes interpreted as a `Self`. If the
1724    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1725    /// `source` is not appropriately aligned, or if `source` is not a valid
1726    /// instance of `Self`, this returns `Err`. If [`Self:
1727    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1728    /// error][ConvertError::from].
1729    ///
1730    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1731    ///
1732    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1733    /// [self-unaligned]: Unaligned
1734    /// [slice-dst]: KnownLayout#dynamically-sized-types
1735    ///
1736    /// # Compile-Time Assertions
1737    ///
1738    /// This method cannot yet be used on unsized types whose dynamically-sized
1739    /// component is zero-sized. Attempting to use this method on such types
1740    /// results in a compile-time assertion error; e.g.:
1741    ///
1742    /// ```compile_fail,E0080
1743    /// use zerocopy::*;
1744    /// # use zerocopy_derive::*;
1745    ///
1746    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1747    /// #[repr(C)]
1748    /// struct ZSTy {
1749    ///     leading_sized: u16,
1750    ///     trailing_dst: [()],
1751    /// }
1752    ///
1753    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1754    /// ```
1755    ///
1756    /// # Examples
1757    ///
1758    /// ```
1759    /// use zerocopy::TryFromBytes;
1760    /// # use zerocopy_derive::*;
1761    ///
1762    /// // The only valid value of this type is the byte `0xC0`
1763    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1764    /// #[repr(u8)]
1765    /// enum C0 { xC0 = 0xC0 }
1766    ///
1767    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1768    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1769    /// #[repr(C)]
1770    /// struct C0C0(C0, C0);
1771    ///
1772    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1773    /// #[repr(C)]
1774    /// struct Packet {
1775    ///     magic_number: C0C0,
1776    ///     mug_size: u8,
1777    ///     temperature: u8,
1778    ///     marshmallows: [[u8; 2]],
1779    /// }
1780    ///
1781    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1782    ///
1783    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1784    ///
1785    /// assert_eq!(packet.mug_size, 240);
1786    /// assert_eq!(packet.temperature, 77);
1787    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1788    ///
1789    /// // These bytes are not valid instance of `Packet`.
1790    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1791    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1792    /// ```
1793    #[must_use = "has no side effects"]
1794    #[inline]
1795    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1796    where
1797        Self: KnownLayout + Immutable,
1798    {
1799        static_assert_dst_is_not_zst!(Self);
1800        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1801            Ok(source) => {
1802                // This call may panic. If that happens, it doesn't cause any soundness
1803                // issues, as we have not generated any invalid state which we need to
1804                // fix before returning.
1805                match source.try_into_valid() {
1806                    Ok(valid) => Ok(valid.as_ref()),
1807                    Err(e) => {
1808                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1809                    }
1810                }
1811            }
1812            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1813        }
1814    }
1815
1816    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1817    ///
1818    /// This method computes the [largest possible size of `Self`][valid-size]
1819    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1820    /// instance of `Self`, this method returns a reference to those bytes
1821    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1822    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1823    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1824    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1825    /// alignment error][ConvertError::from].
1826    ///
1827    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1828    ///
1829    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1830    /// [self-unaligned]: Unaligned
1831    /// [slice-dst]: KnownLayout#dynamically-sized-types
1832    ///
1833    /// # Compile-Time Assertions
1834    ///
1835    /// This method cannot yet be used on unsized types whose dynamically-sized
1836    /// component is zero-sized. Attempting to use this method on such types
1837    /// results in a compile-time assertion error; e.g.:
1838    ///
1839    /// ```compile_fail,E0080
1840    /// use zerocopy::*;
1841    /// # use zerocopy_derive::*;
1842    ///
1843    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1844    /// #[repr(C)]
1845    /// struct ZSTy {
1846    ///     leading_sized: u16,
1847    ///     trailing_dst: [()],
1848    /// }
1849    ///
1850    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1851    /// ```
1852    ///
1853    /// # Examples
1854    ///
1855    /// ```
1856    /// use zerocopy::TryFromBytes;
1857    /// # use zerocopy_derive::*;
1858    ///
1859    /// // The only valid value of this type is the byte `0xC0`
1860    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1861    /// #[repr(u8)]
1862    /// enum C0 { xC0 = 0xC0 }
1863    ///
1864    /// // The only valid value of this type is the bytes `0xC0C0`.
1865    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1866    /// #[repr(C)]
1867    /// struct C0C0(C0, C0);
1868    ///
1869    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1870    /// #[repr(C)]
1871    /// struct Packet {
1872    ///     magic_number: C0C0,
1873    ///     mug_size: u8,
1874    ///     temperature: u8,
1875    ///     marshmallows: [[u8; 2]],
1876    /// }
1877    ///
1878    /// // These are more bytes than are needed to encode a `Packet`.
1879    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1880    ///
1881    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1882    ///
1883    /// assert_eq!(packet.mug_size, 240);
1884    /// assert_eq!(packet.temperature, 77);
1885    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1886    /// assert_eq!(suffix, &[6u8][..]);
1887    ///
1888    /// // These bytes are not valid instance of `Packet`.
1889    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1890    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1891    /// ```
1892    #[must_use = "has no side effects"]
1893    #[inline]
1894    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1895    where
1896        Self: KnownLayout + Immutable,
1897    {
1898        static_assert_dst_is_not_zst!(Self);
1899        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1900    }
1901
1902    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1903    ///
1904    /// This method computes the [largest possible size of `Self`][valid-size]
1905    /// that can fit in the trailing bytes of `source`. If that suffix is a
1906    /// valid instance of `Self`, this method returns a reference to those bytes
1907    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1908    /// are insufficient bytes, or if the suffix of `source` would not be
1909    /// appropriately aligned, or if the suffix is not a valid instance of
1910    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1911    /// can [infallibly discard the alignment error][ConvertError::from].
1912    ///
1913    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1914    ///
1915    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1916    /// [self-unaligned]: Unaligned
1917    /// [slice-dst]: KnownLayout#dynamically-sized-types
1918    ///
1919    /// # Compile-Time Assertions
1920    ///
1921    /// This method cannot yet be used on unsized types whose dynamically-sized
1922    /// component is zero-sized. Attempting to use this method on such types
1923    /// results in a compile-time assertion error; e.g.:
1924    ///
1925    /// ```compile_fail,E0080
1926    /// use zerocopy::*;
1927    /// # use zerocopy_derive::*;
1928    ///
1929    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1930    /// #[repr(C)]
1931    /// struct ZSTy {
1932    ///     leading_sized: u16,
1933    ///     trailing_dst: [()],
1934    /// }
1935    ///
1936    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1937    /// ```
1938    ///
1939    /// # Examples
1940    ///
1941    /// ```
1942    /// use zerocopy::TryFromBytes;
1943    /// # use zerocopy_derive::*;
1944    ///
1945    /// // The only valid value of this type is the byte `0xC0`
1946    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1947    /// #[repr(u8)]
1948    /// enum C0 { xC0 = 0xC0 }
1949    ///
1950    /// // The only valid value of this type is the bytes `0xC0C0`.
1951    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1952    /// #[repr(C)]
1953    /// struct C0C0(C0, C0);
1954    ///
1955    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1956    /// #[repr(C)]
1957    /// struct Packet {
1958    ///     magic_number: C0C0,
1959    ///     mug_size: u8,
1960    ///     temperature: u8,
1961    ///     marshmallows: [[u8; 2]],
1962    /// }
1963    ///
1964    /// // These are more bytes than are needed to encode a `Packet`.
1965    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1966    ///
1967    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1968    ///
1969    /// assert_eq!(packet.mug_size, 240);
1970    /// assert_eq!(packet.temperature, 77);
1971    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1972    /// assert_eq!(prefix, &[0u8][..]);
1973    ///
1974    /// // These bytes are not valid instance of `Packet`.
1975    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1976    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1977    /// ```
1978    #[must_use = "has no side effects"]
1979    #[inline]
1980    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1981    where
1982        Self: KnownLayout + Immutable,
1983    {
1984        static_assert_dst_is_not_zst!(Self);
1985        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1986    }
1987
1988    /// Attempts to interpret the given `source` as a `&mut Self` without
1989    /// copying.
1990    ///
1991    /// If the bytes of `source` are a valid instance of `Self`, this method
1992    /// returns a reference to those bytes interpreted as a `Self`. If the
1993    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1994    /// `source` is not appropriately aligned, or if `source` is not a valid
1995    /// instance of `Self`, this returns `Err`. If [`Self:
1996    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1997    /// error][ConvertError::from].
1998    ///
1999    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2000    ///
2001    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2002    /// [self-unaligned]: Unaligned
2003    /// [slice-dst]: KnownLayout#dynamically-sized-types
2004    ///
2005    /// # Compile-Time Assertions
2006    ///
2007    /// This method cannot yet be used on unsized types whose dynamically-sized
2008    /// component is zero-sized. Attempting to use this method on such types
2009    /// results in a compile-time assertion error; e.g.:
2010    ///
2011    /// ```compile_fail,E0080
2012    /// use zerocopy::*;
2013    /// # use zerocopy_derive::*;
2014    ///
2015    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2016    /// #[repr(C, packed)]
2017    /// struct ZSTy {
2018    ///     leading_sized: [u8; 2],
2019    ///     trailing_dst: [()],
2020    /// }
2021    ///
2022    /// let mut source = [85, 85];
2023    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
2024    /// ```
2025    ///
2026    /// # Examples
2027    ///
2028    /// ```
2029    /// use zerocopy::TryFromBytes;
2030    /// # use zerocopy_derive::*;
2031    ///
2032    /// // The only valid value of this type is the byte `0xC0`
2033    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2034    /// #[repr(u8)]
2035    /// enum C0 { xC0 = 0xC0 }
2036    ///
2037    /// // The only valid value of this type is the bytes `0xC0C0`.
2038    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2039    /// #[repr(C)]
2040    /// struct C0C0(C0, C0);
2041    ///
2042    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2043    /// #[repr(C, packed)]
2044    /// struct Packet {
2045    ///     magic_number: C0C0,
2046    ///     mug_size: u8,
2047    ///     temperature: u8,
2048    ///     marshmallows: [[u8; 2]],
2049    /// }
2050    ///
2051    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2052    ///
2053    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2054    ///
2055    /// assert_eq!(packet.mug_size, 240);
2056    /// assert_eq!(packet.temperature, 77);
2057    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2058    ///
2059    /// packet.temperature = 111;
2060    ///
2061    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2062    ///
2063    /// // These bytes are not valid instance of `Packet`.
2064    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2065    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2066    /// ```
2067    #[must_use = "has no side effects"]
2068    #[inline]
2069    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2070    where
2071        Self: KnownLayout + IntoBytes,
2072    {
2073        static_assert_dst_is_not_zst!(Self);
2074        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2075            Ok(source) => {
2076                // This call may panic. If that happens, it doesn't cause any soundness
2077                // issues, as we have not generated any invalid state which we need to
2078                // fix before returning.
2079                match source.try_into_valid() {
2080                    Ok(source) => Ok(source.as_mut()),
2081                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2082                }
2083            }
2084            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2085        }
2086    }
2087
2088    /// Attempts to interpret the prefix of the given `source` as a `&mut
2089    /// Self`.
2090    ///
2091    /// This method computes the [largest possible size of `Self`][valid-size]
2092    /// that can fit in the leading bytes of `source`. If that prefix is a valid
2093    /// instance of `Self`, this method returns a reference to those bytes
2094    /// interpreted as `Self`, and a reference to the remaining bytes. If there
2095    /// are insufficient bytes, or if `source` is not appropriately aligned, or
2096    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2097    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2098    /// alignment error][ConvertError::from].
2099    ///
2100    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2101    ///
2102    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2103    /// [self-unaligned]: Unaligned
2104    /// [slice-dst]: KnownLayout#dynamically-sized-types
2105    ///
2106    /// # Compile-Time Assertions
2107    ///
2108    /// This method cannot yet be used on unsized types whose dynamically-sized
2109    /// component is zero-sized. Attempting to use this method on such types
2110    /// results in a compile-time assertion error; e.g.:
2111    ///
2112    /// ```compile_fail,E0080
2113    /// use zerocopy::*;
2114    /// # use zerocopy_derive::*;
2115    ///
2116    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2117    /// #[repr(C, packed)]
2118    /// struct ZSTy {
2119    ///     leading_sized: [u8; 2],
2120    ///     trailing_dst: [()],
2121    /// }
2122    ///
2123    /// let mut source = [85, 85];
2124    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
2125    /// ```
2126    ///
2127    /// # Examples
2128    ///
2129    /// ```
2130    /// use zerocopy::TryFromBytes;
2131    /// # use zerocopy_derive::*;
2132    ///
2133    /// // The only valid value of this type is the byte `0xC0`
2134    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2135    /// #[repr(u8)]
2136    /// enum C0 { xC0 = 0xC0 }
2137    ///
2138    /// // The only valid value of this type is the bytes `0xC0C0`.
2139    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2140    /// #[repr(C)]
2141    /// struct C0C0(C0, C0);
2142    ///
2143    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2144    /// #[repr(C, packed)]
2145    /// struct Packet {
2146    ///     magic_number: C0C0,
2147    ///     mug_size: u8,
2148    ///     temperature: u8,
2149    ///     marshmallows: [[u8; 2]],
2150    /// }
2151    ///
2152    /// // These are more bytes than are needed to encode a `Packet`.
2153    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2154    ///
2155    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2156    ///
2157    /// assert_eq!(packet.mug_size, 240);
2158    /// assert_eq!(packet.temperature, 77);
2159    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2160    /// assert_eq!(suffix, &[6u8][..]);
2161    ///
2162    /// packet.temperature = 111;
2163    /// suffix[0] = 222;
2164    ///
2165    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2166    ///
2167    /// // These bytes are not valid instance of `Packet`.
2168    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2169    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2170    /// ```
2171    #[must_use = "has no side effects"]
2172    #[inline]
2173    fn try_mut_from_prefix(
2174        source: &mut [u8],
2175    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2176    where
2177        Self: KnownLayout + IntoBytes,
2178    {
2179        static_assert_dst_is_not_zst!(Self);
2180        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2181    }
2182
2183    /// Attempts to interpret the suffix of the given `source` as a `&mut
2184    /// Self`.
2185    ///
2186    /// This method computes the [largest possible size of `Self`][valid-size]
2187    /// that can fit in the trailing bytes of `source`. If that suffix is a
2188    /// valid instance of `Self`, this method returns a reference to those bytes
2189    /// interpreted as `Self`, and a reference to the preceding bytes. If there
2190    /// are insufficient bytes, or if the suffix of `source` would not be
2191    /// appropriately aligned, or if the suffix is not a valid instance of
2192    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2193    /// can [infallibly discard the alignment error][ConvertError::from].
2194    ///
2195    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2196    ///
2197    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2198    /// [self-unaligned]: Unaligned
2199    /// [slice-dst]: KnownLayout#dynamically-sized-types
2200    ///
2201    /// # Compile-Time Assertions
2202    ///
2203    /// This method cannot yet be used on unsized types whose dynamically-sized
2204    /// component is zero-sized. Attempting to use this method on such types
2205    /// results in a compile-time assertion error; e.g.:
2206    ///
2207    /// ```compile_fail,E0080
2208    /// use zerocopy::*;
2209    /// # use zerocopy_derive::*;
2210    ///
2211    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2212    /// #[repr(C, packed)]
2213    /// struct ZSTy {
2214    ///     leading_sized: u16,
2215    ///     trailing_dst: [()],
2216    /// }
2217    ///
2218    /// let mut source = [85, 85];
2219    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2220    /// ```
2221    ///
2222    /// # Examples
2223    ///
2224    /// ```
2225    /// use zerocopy::TryFromBytes;
2226    /// # use zerocopy_derive::*;
2227    ///
2228    /// // The only valid value of this type is the byte `0xC0`
2229    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2230    /// #[repr(u8)]
2231    /// enum C0 { xC0 = 0xC0 }
2232    ///
2233    /// // The only valid value of this type is the bytes `0xC0C0`.
2234    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2235    /// #[repr(C)]
2236    /// struct C0C0(C0, C0);
2237    ///
2238    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2239    /// #[repr(C, packed)]
2240    /// struct Packet {
2241    ///     magic_number: C0C0,
2242    ///     mug_size: u8,
2243    ///     temperature: u8,
2244    ///     marshmallows: [[u8; 2]],
2245    /// }
2246    ///
2247    /// // These are more bytes than are needed to encode a `Packet`.
2248    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2249    ///
2250    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2251    ///
2252    /// assert_eq!(packet.mug_size, 240);
2253    /// assert_eq!(packet.temperature, 77);
2254    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2255    /// assert_eq!(prefix, &[0u8][..]);
2256    ///
2257    /// prefix[0] = 111;
2258    /// packet.temperature = 222;
2259    ///
2260    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2261    ///
2262    /// // These bytes are not valid instance of `Packet`.
2263    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2264    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2265    /// ```
2266    #[must_use = "has no side effects"]
2267    #[inline]
2268    fn try_mut_from_suffix(
2269        source: &mut [u8],
2270    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2271    where
2272        Self: KnownLayout + IntoBytes,
2273    {
2274        static_assert_dst_is_not_zst!(Self);
2275        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2276    }
2277
2278    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2279    /// equal to `count`.
2280    ///
2281    /// This method attempts to return a reference to `source` interpreted as a
2282    /// `Self` with `count` trailing elements. If the length of `source` is not
2283    /// equal to the size of `Self` with `count` elements, if `source` is not
2284    /// appropriately aligned, or if `source` does not contain a valid instance
2285    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2286    /// you can [infallibly discard the alignment error][ConvertError::from].
2287    ///
2288    /// [self-unaligned]: Unaligned
2289    /// [slice-dst]: KnownLayout#dynamically-sized-types
2290    ///
2291    /// # Examples
2292    ///
2293    /// ```
2294    /// # #![allow(non_camel_case_types)] // For C0::xC0
2295    /// use zerocopy::TryFromBytes;
2296    /// # use zerocopy_derive::*;
2297    ///
2298    /// // The only valid value of this type is the byte `0xC0`
2299    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2300    /// #[repr(u8)]
2301    /// enum C0 { xC0 = 0xC0 }
2302    ///
2303    /// // The only valid value of this type is the bytes `0xC0C0`.
2304    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2305    /// #[repr(C)]
2306    /// struct C0C0(C0, C0);
2307    ///
2308    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2309    /// #[repr(C)]
2310    /// struct Packet {
2311    ///     magic_number: C0C0,
2312    ///     mug_size: u8,
2313    ///     temperature: u8,
2314    ///     marshmallows: [[u8; 2]],
2315    /// }
2316    ///
2317    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2318    ///
2319    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2320    ///
2321    /// assert_eq!(packet.mug_size, 240);
2322    /// assert_eq!(packet.temperature, 77);
2323    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2324    ///
2325    /// // These bytes are not valid instance of `Packet`.
2326    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2327    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2328    /// ```
2329    ///
2330    /// Since an explicit `count` is provided, this method supports types with
2331    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2332    /// which do not take an explicit count do not support such types.
2333    ///
2334    /// ```
2335    /// use core::num::NonZeroU16;
2336    /// use zerocopy::*;
2337    /// # use zerocopy_derive::*;
2338    ///
2339    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2340    /// #[repr(C)]
2341    /// struct ZSTy {
2342    ///     leading_sized: NonZeroU16,
2343    ///     trailing_dst: [()],
2344    /// }
2345    ///
2346    /// let src = 0xCAFEu16.as_bytes();
2347    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2348    /// assert_eq!(zsty.trailing_dst.len(), 42);
2349    /// ```
2350    ///
2351    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2352    #[must_use = "has no side effects"]
2353    #[inline]
2354    fn try_ref_from_bytes_with_elems(
2355        source: &[u8],
2356        count: usize,
2357    ) -> Result<&Self, TryCastError<&[u8], Self>>
2358    where
2359        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2360    {
2361        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2362        {
2363            Ok(source) => {
2364                // This call may panic. If that happens, it doesn't cause any soundness
2365                // issues, as we have not generated any invalid state which we need to
2366                // fix before returning.
2367                match source.try_into_valid() {
2368                    Ok(source) => Ok(source.as_ref()),
2369                    Err(e) => {
2370                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2371                    }
2372                }
2373            }
2374            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2375        }
2376    }
2377
2378    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2379    /// a DST length equal to `count`.
2380    ///
2381    /// This method attempts to return a reference to the prefix of `source`
2382    /// interpreted as a `Self` with `count` trailing elements, and a reference
2383    /// to the remaining bytes. If the length of `source` is less than the size
2384    /// of `Self` with `count` elements, if `source` is not appropriately
2385    /// aligned, or if the prefix of `source` does not contain a valid instance
2386    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2387    /// you can [infallibly discard the alignment error][ConvertError::from].
2388    ///
2389    /// [self-unaligned]: Unaligned
2390    /// [slice-dst]: KnownLayout#dynamically-sized-types
2391    ///
2392    /// # Examples
2393    ///
2394    /// ```
2395    /// # #![allow(non_camel_case_types)] // For C0::xC0
2396    /// use zerocopy::TryFromBytes;
2397    /// # use zerocopy_derive::*;
2398    ///
2399    /// // The only valid value of this type is the byte `0xC0`
2400    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2401    /// #[repr(u8)]
2402    /// enum C0 { xC0 = 0xC0 }
2403    ///
2404    /// // The only valid value of this type is the bytes `0xC0C0`.
2405    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2406    /// #[repr(C)]
2407    /// struct C0C0(C0, C0);
2408    ///
2409    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2410    /// #[repr(C)]
2411    /// struct Packet {
2412    ///     magic_number: C0C0,
2413    ///     mug_size: u8,
2414    ///     temperature: u8,
2415    ///     marshmallows: [[u8; 2]],
2416    /// }
2417    ///
2418    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2419    ///
2420    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2421    ///
2422    /// assert_eq!(packet.mug_size, 240);
2423    /// assert_eq!(packet.temperature, 77);
2424    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2425    /// assert_eq!(suffix, &[8u8][..]);
2426    ///
2427    /// // These bytes are not valid instance of `Packet`.
2428    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2429    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2430    /// ```
2431    ///
2432    /// Since an explicit `count` is provided, this method supports types with
2433    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2434    /// which do not take an explicit count do not support such types.
2435    ///
2436    /// ```
2437    /// use core::num::NonZeroU16;
2438    /// use zerocopy::*;
2439    /// # use zerocopy_derive::*;
2440    ///
2441    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2442    /// #[repr(C)]
2443    /// struct ZSTy {
2444    ///     leading_sized: NonZeroU16,
2445    ///     trailing_dst: [()],
2446    /// }
2447    ///
2448    /// let src = 0xCAFEu16.as_bytes();
2449    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2450    /// assert_eq!(zsty.trailing_dst.len(), 42);
2451    /// ```
2452    ///
2453    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2454    #[must_use = "has no side effects"]
2455    #[inline]
2456    fn try_ref_from_prefix_with_elems(
2457        source: &[u8],
2458        count: usize,
2459    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2460    where
2461        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2462    {
2463        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2464    }
2465
2466    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2467    /// a DST length equal to `count`.
2468    ///
2469    /// This method attempts to return a reference to the suffix of `source`
2470    /// interpreted as a `Self` with `count` trailing elements, and a reference
2471    /// to the preceding bytes. If the length of `source` is less than the size
2472    /// of `Self` with `count` elements, if the suffix of `source` is not
2473    /// appropriately aligned, or if the suffix of `source` does not contain a
2474    /// valid instance of `Self`, this returns `Err`. If [`Self:
2475    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2476    /// error][ConvertError::from].
2477    ///
2478    /// [self-unaligned]: Unaligned
2479    /// [slice-dst]: KnownLayout#dynamically-sized-types
2480    ///
2481    /// # Examples
2482    ///
2483    /// ```
2484    /// # #![allow(non_camel_case_types)] // For C0::xC0
2485    /// use zerocopy::TryFromBytes;
2486    /// # use zerocopy_derive::*;
2487    ///
2488    /// // The only valid value of this type is the byte `0xC0`
2489    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2490    /// #[repr(u8)]
2491    /// enum C0 { xC0 = 0xC0 }
2492    ///
2493    /// // The only valid value of this type is the bytes `0xC0C0`.
2494    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2495    /// #[repr(C)]
2496    /// struct C0C0(C0, C0);
2497    ///
2498    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2499    /// #[repr(C)]
2500    /// struct Packet {
2501    ///     magic_number: C0C0,
2502    ///     mug_size: u8,
2503    ///     temperature: u8,
2504    ///     marshmallows: [[u8; 2]],
2505    /// }
2506    ///
2507    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2508    ///
2509    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2510    ///
2511    /// assert_eq!(packet.mug_size, 240);
2512    /// assert_eq!(packet.temperature, 77);
2513    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2514    /// assert_eq!(prefix, &[123u8][..]);
2515    ///
2516    /// // These bytes are not valid instance of `Packet`.
2517    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2518    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2519    /// ```
2520    ///
2521    /// Since an explicit `count` is provided, this method supports types with
2522    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2523    /// which do not take an explicit count do not support such types.
2524    ///
2525    /// ```
2526    /// use core::num::NonZeroU16;
2527    /// use zerocopy::*;
2528    /// # use zerocopy_derive::*;
2529    ///
2530    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2531    /// #[repr(C)]
2532    /// struct ZSTy {
2533    ///     leading_sized: NonZeroU16,
2534    ///     trailing_dst: [()],
2535    /// }
2536    ///
2537    /// let src = 0xCAFEu16.as_bytes();
2538    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2539    /// assert_eq!(zsty.trailing_dst.len(), 42);
2540    /// ```
2541    ///
2542    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2543    #[must_use = "has no side effects"]
2544    #[inline]
2545    fn try_ref_from_suffix_with_elems(
2546        source: &[u8],
2547        count: usize,
2548    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2549    where
2550        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2551    {
2552        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2553    }
2554
2555    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2556    /// length equal to `count`.
2557    ///
2558    /// This method attempts to return a reference to `source` interpreted as a
2559    /// `Self` with `count` trailing elements. If the length of `source` is not
2560    /// equal to the size of `Self` with `count` elements, if `source` is not
2561    /// appropriately aligned, or if `source` does not contain a valid instance
2562    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2563    /// you can [infallibly discard the alignment error][ConvertError::from].
2564    ///
2565    /// [self-unaligned]: Unaligned
2566    /// [slice-dst]: KnownLayout#dynamically-sized-types
2567    ///
2568    /// # Examples
2569    ///
2570    /// ```
2571    /// # #![allow(non_camel_case_types)] // For C0::xC0
2572    /// use zerocopy::TryFromBytes;
2573    /// # use zerocopy_derive::*;
2574    ///
2575    /// // The only valid value of this type is the byte `0xC0`
2576    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2577    /// #[repr(u8)]
2578    /// enum C0 { xC0 = 0xC0 }
2579    ///
2580    /// // The only valid value of this type is the bytes `0xC0C0`.
2581    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2582    /// #[repr(C)]
2583    /// struct C0C0(C0, C0);
2584    ///
2585    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2586    /// #[repr(C, packed)]
2587    /// struct Packet {
2588    ///     magic_number: C0C0,
2589    ///     mug_size: u8,
2590    ///     temperature: u8,
2591    ///     marshmallows: [[u8; 2]],
2592    /// }
2593    ///
2594    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2595    ///
2596    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2597    ///
2598    /// assert_eq!(packet.mug_size, 240);
2599    /// assert_eq!(packet.temperature, 77);
2600    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2601    ///
2602    /// packet.temperature = 111;
2603    ///
2604    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2605    ///
2606    /// // These bytes are not valid instance of `Packet`.
2607    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2608    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2609    /// ```
2610    ///
2611    /// Since an explicit `count` is provided, this method supports types with
2612    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2613    /// which do not take an explicit count do not support such types.
2614    ///
2615    /// ```
2616    /// use core::num::NonZeroU16;
2617    /// use zerocopy::*;
2618    /// # use zerocopy_derive::*;
2619    ///
2620    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2621    /// #[repr(C, packed)]
2622    /// struct ZSTy {
2623    ///     leading_sized: NonZeroU16,
2624    ///     trailing_dst: [()],
2625    /// }
2626    ///
2627    /// let mut src = 0xCAFEu16;
2628    /// let src = src.as_mut_bytes();
2629    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2630    /// assert_eq!(zsty.trailing_dst.len(), 42);
2631    /// ```
2632    ///
2633    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2634    #[must_use = "has no side effects"]
2635    #[inline]
2636    fn try_mut_from_bytes_with_elems(
2637        source: &mut [u8],
2638        count: usize,
2639    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2640    where
2641        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2642    {
2643        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2644        {
2645            Ok(source) => {
2646                // This call may panic. If that happens, it doesn't cause any soundness
2647                // issues, as we have not generated any invalid state which we need to
2648                // fix before returning.
2649                match source.try_into_valid() {
2650                    Ok(source) => Ok(source.as_mut()),
2651                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2652                }
2653            }
2654            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2655        }
2656    }
2657
2658    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2659    /// with a DST length equal to `count`.
2660    ///
2661    /// This method attempts to return a reference to the prefix of `source`
2662    /// interpreted as a `Self` with `count` trailing elements, and a reference
2663    /// to the remaining bytes. If the length of `source` is less than the size
2664    /// of `Self` with `count` elements, if `source` is not appropriately
2665    /// aligned, or if the prefix of `source` does not contain a valid instance
2666    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2667    /// you can [infallibly discard the alignment error][ConvertError::from].
2668    ///
2669    /// [self-unaligned]: Unaligned
2670    /// [slice-dst]: KnownLayout#dynamically-sized-types
2671    ///
2672    /// # Examples
2673    ///
2674    /// ```
2675    /// # #![allow(non_camel_case_types)] // For C0::xC0
2676    /// use zerocopy::TryFromBytes;
2677    /// # use zerocopy_derive::*;
2678    ///
2679    /// // The only valid value of this type is the byte `0xC0`
2680    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2681    /// #[repr(u8)]
2682    /// enum C0 { xC0 = 0xC0 }
2683    ///
2684    /// // The only valid value of this type is the bytes `0xC0C0`.
2685    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2686    /// #[repr(C)]
2687    /// struct C0C0(C0, C0);
2688    ///
2689    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2690    /// #[repr(C, packed)]
2691    /// struct Packet {
2692    ///     magic_number: C0C0,
2693    ///     mug_size: u8,
2694    ///     temperature: u8,
2695    ///     marshmallows: [[u8; 2]],
2696    /// }
2697    ///
2698    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2699    ///
2700    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2701    ///
2702    /// assert_eq!(packet.mug_size, 240);
2703    /// assert_eq!(packet.temperature, 77);
2704    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2705    /// assert_eq!(suffix, &[8u8][..]);
2706    ///
2707    /// packet.temperature = 111;
2708    /// suffix[0] = 222;
2709    ///
2710    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2711    ///
2712    /// // These bytes are not valid instance of `Packet`.
2713    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2714    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2715    /// ```
2716    ///
2717    /// Since an explicit `count` is provided, this method supports types with
2718    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2719    /// which do not take an explicit count do not support such types.
2720    ///
2721    /// ```
2722    /// use core::num::NonZeroU16;
2723    /// use zerocopy::*;
2724    /// # use zerocopy_derive::*;
2725    ///
2726    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2727    /// #[repr(C, packed)]
2728    /// struct ZSTy {
2729    ///     leading_sized: NonZeroU16,
2730    ///     trailing_dst: [()],
2731    /// }
2732    ///
2733    /// let mut src = 0xCAFEu16;
2734    /// let src = src.as_mut_bytes();
2735    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2736    /// assert_eq!(zsty.trailing_dst.len(), 42);
2737    /// ```
2738    ///
2739    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2740    #[must_use = "has no side effects"]
2741    #[inline]
2742    fn try_mut_from_prefix_with_elems(
2743        source: &mut [u8],
2744        count: usize,
2745    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2746    where
2747        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2748    {
2749        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2750    }
2751
2752    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2753    /// with a DST length equal to `count`.
2754    ///
2755    /// This method attempts to return a reference to the suffix of `source`
2756    /// interpreted as a `Self` with `count` trailing elements, and a reference
2757    /// to the preceding bytes. If the length of `source` is less than the size
2758    /// of `Self` with `count` elements, if the suffix of `source` is not
2759    /// appropriately aligned, or if the suffix of `source` does not contain a
2760    /// valid instance of `Self`, this returns `Err`. If [`Self:
2761    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2762    /// error][ConvertError::from].
2763    ///
2764    /// [self-unaligned]: Unaligned
2765    /// [slice-dst]: KnownLayout#dynamically-sized-types
2766    ///
2767    /// # Examples
2768    ///
2769    /// ```
2770    /// # #![allow(non_camel_case_types)] // For C0::xC0
2771    /// use zerocopy::TryFromBytes;
2772    /// # use zerocopy_derive::*;
2773    ///
2774    /// // The only valid value of this type is the byte `0xC0`
2775    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2776    /// #[repr(u8)]
2777    /// enum C0 { xC0 = 0xC0 }
2778    ///
2779    /// // The only valid value of this type is the bytes `0xC0C0`.
2780    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2781    /// #[repr(C)]
2782    /// struct C0C0(C0, C0);
2783    ///
2784    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2785    /// #[repr(C, packed)]
2786    /// struct Packet {
2787    ///     magic_number: C0C0,
2788    ///     mug_size: u8,
2789    ///     temperature: u8,
2790    ///     marshmallows: [[u8; 2]],
2791    /// }
2792    ///
2793    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2794    ///
2795    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2796    ///
2797    /// assert_eq!(packet.mug_size, 240);
2798    /// assert_eq!(packet.temperature, 77);
2799    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2800    /// assert_eq!(prefix, &[123u8][..]);
2801    ///
2802    /// prefix[0] = 111;
2803    /// packet.temperature = 222;
2804    ///
2805    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2806    ///
2807    /// // These bytes are not valid instance of `Packet`.
2808    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2809    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2810    /// ```
2811    ///
2812    /// Since an explicit `count` is provided, this method supports types with
2813    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2814    /// which do not take an explicit count do not support such types.
2815    ///
2816    /// ```
2817    /// use core::num::NonZeroU16;
2818    /// use zerocopy::*;
2819    /// # use zerocopy_derive::*;
2820    ///
2821    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2822    /// #[repr(C, packed)]
2823    /// struct ZSTy {
2824    ///     leading_sized: NonZeroU16,
2825    ///     trailing_dst: [()],
2826    /// }
2827    ///
2828    /// let mut src = 0xCAFEu16;
2829    /// let src = src.as_mut_bytes();
2830    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2831    /// assert_eq!(zsty.trailing_dst.len(), 42);
2832    /// ```
2833    ///
2834    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2835    #[must_use = "has no side effects"]
2836    #[inline]
2837    fn try_mut_from_suffix_with_elems(
2838        source: &mut [u8],
2839        count: usize,
2840    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2841    where
2842        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2843    {
2844        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2845    }
2846
2847    /// Attempts to read the given `source` as a `Self`.
2848    ///
2849    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2850    /// instance of `Self`, this returns `Err`.
2851    ///
2852    /// # Examples
2853    ///
2854    /// ```
2855    /// use zerocopy::TryFromBytes;
2856    /// # use zerocopy_derive::*;
2857    ///
2858    /// // The only valid value of this type is the byte `0xC0`
2859    /// #[derive(TryFromBytes)]
2860    /// #[repr(u8)]
2861    /// enum C0 { xC0 = 0xC0 }
2862    ///
2863    /// // The only valid value of this type is the bytes `0xC0C0`.
2864    /// #[derive(TryFromBytes)]
2865    /// #[repr(C)]
2866    /// struct C0C0(C0, C0);
2867    ///
2868    /// #[derive(TryFromBytes)]
2869    /// #[repr(C)]
2870    /// struct Packet {
2871    ///     magic_number: C0C0,
2872    ///     mug_size: u8,
2873    ///     temperature: u8,
2874    /// }
2875    ///
2876    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2877    ///
2878    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2879    ///
2880    /// assert_eq!(packet.mug_size, 240);
2881    /// assert_eq!(packet.temperature, 77);
2882    ///
2883    /// // These bytes are not valid instance of `Packet`.
2884    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2885    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2886    /// ```
2887    ///
2888    /// # Performance Considerations
2889    ///
2890    /// In this version of zerocopy, this method reads the `source` into a
2891    /// well-aligned stack allocation and *then* validates that the allocation
2892    /// is a valid `Self`. This ensures that validation can be performed using
2893    /// aligned reads (which carry a performance advantage over unaligned reads
2894    /// on many platforms) at the cost of an unconditional copy.
2895    #[must_use = "has no side effects"]
2896    #[inline]
2897    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2898    where
2899        Self: Sized,
2900    {
2901        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
2902
2903        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2904            Ok(candidate) => candidate,
2905            Err(e) => {
2906                return Err(TryReadError::Size(e.with_dst()));
2907            }
2908        };
2909        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2910        // its bytes are initialized.
2911        unsafe { try_read_from(source, candidate) }
2912    }
2913
2914    /// Attempts to read a `Self` from the prefix of the given `source`.
2915    ///
2916    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2917    /// of `source`, returning that `Self` and any remaining bytes. If
2918    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2919    /// of `Self`, it returns `Err`.
2920    ///
2921    /// # Examples
2922    ///
2923    /// ```
2924    /// use zerocopy::TryFromBytes;
2925    /// # use zerocopy_derive::*;
2926    ///
2927    /// // The only valid value of this type is the byte `0xC0`
2928    /// #[derive(TryFromBytes)]
2929    /// #[repr(u8)]
2930    /// enum C0 { xC0 = 0xC0 }
2931    ///
2932    /// // The only valid value of this type is the bytes `0xC0C0`.
2933    /// #[derive(TryFromBytes)]
2934    /// #[repr(C)]
2935    /// struct C0C0(C0, C0);
2936    ///
2937    /// #[derive(TryFromBytes)]
2938    /// #[repr(C)]
2939    /// struct Packet {
2940    ///     magic_number: C0C0,
2941    ///     mug_size: u8,
2942    ///     temperature: u8,
2943    /// }
2944    ///
2945    /// // These are more bytes than are needed to encode a `Packet`.
2946    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2947    ///
2948    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2949    ///
2950    /// assert_eq!(packet.mug_size, 240);
2951    /// assert_eq!(packet.temperature, 77);
2952    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2953    ///
2954    /// // These bytes are not valid instance of `Packet`.
2955    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2956    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2957    /// ```
2958    ///
2959    /// # Performance Considerations
2960    ///
2961    /// In this version of zerocopy, this method reads the `source` into a
2962    /// well-aligned stack allocation and *then* validates that the allocation
2963    /// is a valid `Self`. This ensures that validation can be performed using
2964    /// aligned reads (which carry a performance advantage over unaligned reads
2965    /// on many platforms) at the cost of an unconditional copy.
2966    #[must_use = "has no side effects"]
2967    #[inline]
2968    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2969    where
2970        Self: Sized,
2971    {
2972        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
2973
2974        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2975            Ok(candidate) => candidate,
2976            Err(e) => {
2977                return Err(TryReadError::Size(e.with_dst()));
2978            }
2979        };
2980        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2981        // its bytes are initialized.
2982        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2983    }
2984
2985    /// Attempts to read a `Self` from the suffix of the given `source`.
2986    ///
2987    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2988    /// of `source`, returning that `Self` and any preceding bytes. If
2989    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2990    /// of `Self`, it returns `Err`.
2991    ///
2992    /// # Examples
2993    ///
2994    /// ```
2995    /// # #![allow(non_camel_case_types)] // For C0::xC0
2996    /// use zerocopy::TryFromBytes;
2997    /// # use zerocopy_derive::*;
2998    ///
2999    /// // The only valid value of this type is the byte `0xC0`
3000    /// #[derive(TryFromBytes)]
3001    /// #[repr(u8)]
3002    /// enum C0 { xC0 = 0xC0 }
3003    ///
3004    /// // The only valid value of this type is the bytes `0xC0C0`.
3005    /// #[derive(TryFromBytes)]
3006    /// #[repr(C)]
3007    /// struct C0C0(C0, C0);
3008    ///
3009    /// #[derive(TryFromBytes)]
3010    /// #[repr(C)]
3011    /// struct Packet {
3012    ///     magic_number: C0C0,
3013    ///     mug_size: u8,
3014    ///     temperature: u8,
3015    /// }
3016    ///
3017    /// // These are more bytes than are needed to encode a `Packet`.
3018    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
3019    ///
3020    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
3021    ///
3022    /// assert_eq!(packet.mug_size, 240);
3023    /// assert_eq!(packet.temperature, 77);
3024    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3025    ///
3026    /// // These bytes are not valid instance of `Packet`.
3027    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
3028    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
3029    /// ```
3030    ///
3031    /// # Performance Considerations
3032    ///
3033    /// In this version of zerocopy, this method reads the `source` into a
3034    /// well-aligned stack allocation and *then* validates that the allocation
3035    /// is a valid `Self`. This ensures that validation can be performed using
3036    /// aligned reads (which carry a performance advantage over unaligned reads
3037    /// on many platforms) at the cost of an unconditional copy.
3038    #[must_use = "has no side effects"]
3039    #[inline]
3040    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
3041    where
3042        Self: Sized,
3043    {
3044        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3045
3046        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
3047            Ok(candidate) => candidate,
3048            Err(e) => {
3049                return Err(TryReadError::Size(e.with_dst()));
3050            }
3051        };
3052        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3053        // its bytes are initialized.
3054        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3055    }
3056}
3057
3058#[inline(always)]
3059fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3060    source: &[u8],
3061    cast_type: CastType,
3062    meta: Option<T::PointerMetadata>,
3063) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3064    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3065        Ok((source, prefix_suffix)) => {
3066            // This call may panic. If that happens, it doesn't cause any soundness
3067            // issues, as we have not generated any invalid state which we need to
3068            // fix before returning.
3069            match source.try_into_valid() {
3070                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3071                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3072            }
3073        }
3074        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3075    }
3076}
3077
3078#[inline(always)]
3079fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3080    candidate: &mut [u8],
3081    cast_type: CastType,
3082    meta: Option<T::PointerMetadata>,
3083) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3084    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3085        Ok((candidate, prefix_suffix)) => {
3086            // This call may panic. If that happens, it doesn't cause any soundness
3087            // issues, as we have not generated any invalid state which we need to
3088            // fix before returning.
3089            match candidate.try_into_valid() {
3090                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3091                Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3092            }
3093        }
3094        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3095    }
3096}
3097
3098#[inline(always)]
3099fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3100    (u, t)
3101}
3102
3103/// # Safety
3104///
3105/// All bytes of `candidate` must be initialized.
3106#[inline(always)]
3107unsafe fn try_read_from<S, T: TryFromBytes>(
3108    source: S,
3109    mut candidate: CoreMaybeUninit<T>,
3110) -> Result<T, TryReadError<S, T>> {
3111    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3112    // to add a `T: Immutable` bound.
3113    let c_ptr = Ptr::from_mut(&mut candidate);
3114    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3115    // `candidate`, which the caller promises is entirely initialized. Since
3116    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3117    // no values written to an `Initialized` `c_ptr` can violate its validity.
3118    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3119    // via `c_ptr` so long as it is live, so we don't need to worry about the
3120    // fact that `c_ptr` may have more restricted validity than `candidate`.
3121    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3122    let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3123
3124    // Since we don't have `T: KnownLayout`, we hack around that by using
3125    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3126    //
3127    // This call may panic. If that happens, it doesn't cause any soundness
3128    // issues, as we have not generated any invalid state which we need to fix
3129    // before returning.
3130    if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3131        return Err(ValidityError::new(source).into());
3132    }
3133
3134    fn _assert_same_size_and_validity<T>()
3135    where
3136        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3137        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3138    {
3139    }
3140
3141    _assert_same_size_and_validity::<T>();
3142
3143    // SAFETY: We just validated that `candidate` contains a valid
3144    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3145    // guaranteed by the preceding type assertion.
3146    Ok(unsafe { candidate.assume_init() })
3147}
3148
3149/// Types for which a sequence of `0` bytes is a valid instance.
3150///
3151/// Any memory region of the appropriate length which is guaranteed to contain
3152/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3153/// overhead. This is useful whenever memory is known to be in a zeroed state,
3154/// such memory returned from some allocation routines.
3155///
3156/// # Warning: Padding bytes
3157///
3158/// Note that, when a value is moved or copied, only the non-padding bytes of
3159/// that value are guaranteed to be preserved. It is unsound to assume that
3160/// values written to padding bytes are preserved after a move or copy. For more
3161/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3162///
3163/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3164///
3165/// # Implementation
3166///
3167/// **Do not implement this trait yourself!** Instead, use
3168/// [`#[derive(FromZeros)]`][derive]; e.g.:
3169///
3170/// ```
3171/// # use zerocopy_derive::{FromZeros, Immutable};
3172/// #[derive(FromZeros)]
3173/// struct MyStruct {
3174/// # /*
3175///     ...
3176/// # */
3177/// }
3178///
3179/// #[derive(FromZeros)]
3180/// #[repr(u8)]
3181/// enum MyEnum {
3182/// #   Variant0,
3183/// # /*
3184///     ...
3185/// # */
3186/// }
3187///
3188/// #[derive(FromZeros, Immutable)]
3189/// union MyUnion {
3190/// #   variant: u8,
3191/// # /*
3192///     ...
3193/// # */
3194/// }
3195/// ```
3196///
3197/// This derive performs a sophisticated, compile-time safety analysis to
3198/// determine whether a type is `FromZeros`.
3199///
3200/// # Safety
3201///
3202/// *This section describes what is required in order for `T: FromZeros`, and
3203/// what unsafe code may assume of such types. If you don't plan on implementing
3204/// `FromZeros` manually, and you don't plan on writing unsafe code that
3205/// operates on `FromZeros` types, then you don't need to read this section.*
3206///
3207/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3208/// `T` whose bytes are all initialized to zero. If a type is marked as
3209/// `FromZeros` which violates this contract, it may cause undefined behavior.
3210///
3211/// `#[derive(FromZeros)]` only permits [types which satisfy these
3212/// requirements][derive-analysis].
3213///
3214#[cfg_attr(
3215    feature = "derive",
3216    doc = "[derive]: zerocopy_derive::FromZeros",
3217    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3218)]
3219#[cfg_attr(
3220    not(feature = "derive"),
3221    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3222    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3223)]
3224#[cfg_attr(
3225    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3226    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3227)]
3228pub unsafe trait FromZeros: TryFromBytes {
3229    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3230    // safe.
3231    #[doc(hidden)]
3232    fn only_derive_is_allowed_to_implement_this_trait()
3233    where
3234        Self: Sized;
3235
3236    /// Overwrites `self` with zeros.
3237    ///
3238    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3239    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3240    /// drop the current value and replace it with a new one — it simply
3241    /// modifies the bytes of the existing value.
3242    ///
3243    /// # Examples
3244    ///
3245    /// ```
3246    /// # use zerocopy::FromZeros;
3247    /// # use zerocopy_derive::*;
3248    /// #
3249    /// #[derive(FromZeros)]
3250    /// #[repr(C)]
3251    /// struct PacketHeader {
3252    ///     src_port: [u8; 2],
3253    ///     dst_port: [u8; 2],
3254    ///     length: [u8; 2],
3255    ///     checksum: [u8; 2],
3256    /// }
3257    ///
3258    /// let mut header = PacketHeader {
3259    ///     src_port: 100u16.to_be_bytes(),
3260    ///     dst_port: 200u16.to_be_bytes(),
3261    ///     length: 300u16.to_be_bytes(),
3262    ///     checksum: 400u16.to_be_bytes(),
3263    /// };
3264    ///
3265    /// header.zero();
3266    ///
3267    /// assert_eq!(header.src_port, [0, 0]);
3268    /// assert_eq!(header.dst_port, [0, 0]);
3269    /// assert_eq!(header.length, [0, 0]);
3270    /// assert_eq!(header.checksum, [0, 0]);
3271    /// ```
3272    #[inline(always)]
3273    fn zero(&mut self) {
3274        let slf: *mut Self = self;
3275        let len = mem::size_of_val(self);
3276        // SAFETY:
3277        // - `self` is guaranteed by the type system to be valid for writes of
3278        //   size `size_of_val(self)`.
3279        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3280        //   as required by `u8`.
3281        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3282        //   of `Self.`
3283        //
3284        // FIXME(#429): Add references to docs and quotes.
3285        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3286    }
3287
3288    /// Creates an instance of `Self` from zeroed bytes.
3289    ///
3290    /// # Examples
3291    ///
3292    /// ```
3293    /// # use zerocopy::FromZeros;
3294    /// # use zerocopy_derive::*;
3295    /// #
3296    /// #[derive(FromZeros)]
3297    /// #[repr(C)]
3298    /// struct PacketHeader {
3299    ///     src_port: [u8; 2],
3300    ///     dst_port: [u8; 2],
3301    ///     length: [u8; 2],
3302    ///     checksum: [u8; 2],
3303    /// }
3304    ///
3305    /// let header: PacketHeader = FromZeros::new_zeroed();
3306    ///
3307    /// assert_eq!(header.src_port, [0, 0]);
3308    /// assert_eq!(header.dst_port, [0, 0]);
3309    /// assert_eq!(header.length, [0, 0]);
3310    /// assert_eq!(header.checksum, [0, 0]);
3311    /// ```
3312    #[must_use = "has no side effects"]
3313    #[inline(always)]
3314    fn new_zeroed() -> Self
3315    where
3316        Self: Sized,
3317    {
3318        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3319        unsafe { mem::zeroed() }
3320    }
3321
3322    /// Creates a `Box<Self>` from zeroed bytes.
3323    ///
3324    /// This function is useful for allocating large values on the heap and
3325    /// zero-initializing them, without ever creating a temporary instance of
3326    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3327    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3328    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3329    ///
3330    /// On systems that use a heap implementation that supports allocating from
3331    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3332    /// have performance benefits.
3333    ///
3334    /// # Errors
3335    ///
3336    /// Returns an error on allocation failure. Allocation failure is guaranteed
3337    /// never to cause a panic or an abort.
3338    #[must_use = "has no side effects (other than allocation)"]
3339    #[cfg(any(feature = "alloc", test))]
3340    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3341    #[inline]
3342    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3343    where
3344        Self: Sized,
3345    {
3346        // If `T` is a ZST, then return a proper boxed instance of it. There is
3347        // no allocation, but `Box` does require a correct dangling pointer.
3348        let layout = Layout::new::<Self>();
3349        if layout.size() == 0 {
3350            // Construct the `Box` from a dangling pointer to avoid calling
3351            // `Self::new_zeroed`. This ensures that stack space is never
3352            // allocated for `Self` even on lower opt-levels where this branch
3353            // might not get optimized out.
3354
3355            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3356            // requirements are that the pointer is non-null and sufficiently
3357            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3358            // is sufficiently aligned. Since the produced pointer is a
3359            // `NonNull`, it is non-null.
3360            //
3361            // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3362            //
3363            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3364            //
3365            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3366            //
3367            //   Creates a new `NonNull` that is dangling, but well-aligned.
3368            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3369        }
3370
3371        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3372        #[allow(clippy::undocumented_unsafe_blocks)]
3373        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3374        if ptr.is_null() {
3375            return Err(AllocError);
3376        }
3377        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3378        #[allow(clippy::undocumented_unsafe_blocks)]
3379        Ok(unsafe { Box::from_raw(ptr) })
3380    }
3381
3382    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3383    ///
3384    /// This function is useful for allocating large values of `[Self]` on the
3385    /// heap and zero-initializing them, without ever creating a temporary
3386    /// instance of `[Self; _]` on the stack. For example,
3387    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3388    /// the heap; it does not require storing the slice on the stack.
3389    ///
3390    /// On systems that use a heap implementation that supports allocating from
3391    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3392    /// benefits.
3393    ///
3394    /// If `Self` is a zero-sized type, then this function will return a
3395    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3396    /// actual information, but its `len()` property will report the correct
3397    /// value.
3398    ///
3399    /// # Errors
3400    ///
3401    /// Returns an error on allocation failure. Allocation failure is
3402    /// guaranteed never to cause a panic or an abort.
3403    #[must_use = "has no side effects (other than allocation)"]
3404    #[cfg(feature = "alloc")]
3405    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3406    #[inline]
3407    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3408    where
3409        Self: KnownLayout<PointerMetadata = usize>,
3410    {
3411        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3412        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3413        // (and, consequently, the `Box` derived from it) is a valid instance of
3414        // `Self`, because `Self` is `FromZeros`.
3415        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3416    }
3417
3418    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3419    #[doc(hidden)]
3420    #[cfg(feature = "alloc")]
3421    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3422    #[must_use = "has no side effects (other than allocation)"]
3423    #[inline(always)]
3424    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3425    where
3426        Self: Sized,
3427    {
3428        <[Self]>::new_box_zeroed_with_elems(len)
3429    }
3430
3431    /// Creates a `Vec<Self>` from zeroed bytes.
3432    ///
3433    /// This function is useful for allocating large values of `Vec`s and
3434    /// zero-initializing them, without ever creating a temporary instance of
3435    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3436    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3437    /// heap; it does not require storing intermediate values on the stack.
3438    ///
3439    /// On systems that use a heap implementation that supports allocating from
3440    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3441    ///
3442    /// If `Self` is a zero-sized type, then this function will return a
3443    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3444    /// actual information, but its `len()` property will report the correct
3445    /// value.
3446    ///
3447    /// # Errors
3448    ///
3449    /// Returns an error on allocation failure. Allocation failure is
3450    /// guaranteed never to cause a panic or an abort.
3451    #[must_use = "has no side effects (other than allocation)"]
3452    #[cfg(feature = "alloc")]
3453    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3454    #[inline(always)]
3455    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3456    where
3457        Self: Sized,
3458    {
3459        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3460    }
3461
3462    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3463    /// the vector. The new items are initialized with zeros.
3464    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3465    #[cfg(feature = "alloc")]
3466    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3467    #[inline(always)]
3468    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3469    where
3470        Self: Sized,
3471    {
3472        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3473        // panic condition is not satisfied.
3474        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3475    }
3476
3477    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3478    /// items are initialized with zeros.
3479    ///
3480    /// # Panics
3481    ///
3482    /// Panics if `position > v.len()`.
3483    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3484    #[cfg(feature = "alloc")]
3485    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3486    #[inline]
3487    fn insert_vec_zeroed(
3488        v: &mut Vec<Self>,
3489        position: usize,
3490        additional: usize,
3491    ) -> Result<(), AllocError>
3492    where
3493        Self: Sized,
3494    {
3495        assert!(position <= v.len());
3496        // We only conditionally compile on versions on which `try_reserve` is
3497        // stable; the Clippy lint is a false positive.
3498        v.try_reserve(additional).map_err(|_| AllocError)?;
3499        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3500        // * `ptr.add(position)`
3501        // * `position + additional`
3502        // * `v.len() + additional`
3503        //
3504        // `v.len() - position` cannot overflow because we asserted that
3505        // `position <= v.len()`.
3506        #[allow(clippy::multiple_unsafe_ops_per_block)]
3507        unsafe {
3508            // This is a potentially overlapping copy.
3509            let ptr = v.as_mut_ptr();
3510            #[allow(clippy::arithmetic_side_effects)]
3511            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3512            ptr.add(position).write_bytes(0, additional);
3513            #[allow(clippy::arithmetic_side_effects)]
3514            v.set_len(v.len() + additional);
3515        }
3516
3517        Ok(())
3518    }
3519}
3520
3521/// Analyzes whether a type is [`FromBytes`].
3522///
3523/// This derive analyzes, at compile time, whether the annotated type satisfies
3524/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3525/// supertraits if it is sound to do so. This derive can be applied to structs,
3526/// enums, and unions;
3527/// e.g.:
3528///
3529/// ```
3530/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3531/// #[derive(FromBytes)]
3532/// struct MyStruct {
3533/// # /*
3534///     ...
3535/// # */
3536/// }
3537///
3538/// #[derive(FromBytes)]
3539/// #[repr(u8)]
3540/// enum MyEnum {
3541/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3542/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3543/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3544/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3545/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3546/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3547/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3548/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3549/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3550/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3551/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3552/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3553/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3554/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3555/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3556/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3557/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3558/// #   VFF,
3559/// # /*
3560///     ...
3561/// # */
3562/// }
3563///
3564/// #[derive(FromBytes, Immutable)]
3565/// union MyUnion {
3566/// #   variant: u8,
3567/// # /*
3568///     ...
3569/// # */
3570/// }
3571/// ```
3572///
3573/// [safety conditions]: trait@FromBytes#safety
3574///
3575/// # Analysis
3576///
3577/// *This section describes, roughly, the analysis performed by this derive to
3578/// determine whether it is sound to implement `FromBytes` for a given type.
3579/// Unless you are modifying the implementation of this derive, or attempting to
3580/// manually implement `FromBytes` for a type yourself, you don't need to read
3581/// this section.*
3582///
3583/// If a type has the following properties, then this derive can implement
3584/// `FromBytes` for that type:
3585///
3586/// - If the type is a struct, all of its fields must be `FromBytes`.
3587/// - If the type is an enum:
3588///   - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3589///     or `i16`.
3590///   - The maximum number of discriminants must be used (so that every possible
3591///     bit pattern is a valid one).
3592///   - Its fields must be `FromBytes`.
3593///
3594/// This analysis is subject to change. Unsafe code may *only* rely on the
3595/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3596/// implementation details of this derive.
3597///
3598/// ## Why isn't an explicit representation required for structs?
3599///
3600/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3601/// that structs are marked with `#[repr(C)]`.
3602///
3603/// Per the [Rust reference](reference),
3604///
3605/// > The representation of a type can change the padding between fields, but
3606/// > does not change the layout of the fields themselves.
3607///
3608/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3609///
3610/// Since the layout of structs only consists of padding bytes and field bytes,
3611/// a struct is soundly `FromBytes` if:
3612/// 1. its padding is soundly `FromBytes`, and
3613/// 2. its fields are soundly `FromBytes`.
3614///
3615/// The answer to the first question is always yes: padding bytes do not have
3616/// any validity constraints. A [discussion] of this question in the Unsafe Code
3617/// Guidelines Working Group concluded that it would be virtually unimaginable
3618/// for future versions of rustc to add validity constraints to padding bytes.
3619///
3620/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3621///
3622/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3623/// its fields are `FromBytes`.
3624#[cfg(any(feature = "derive", test))]
3625#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3626pub use zerocopy_derive::FromBytes;
3627
3628/// Types for which any bit pattern is valid.
3629///
3630/// Any memory region of the appropriate length which contains initialized bytes
3631/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3632/// useful for efficiently parsing bytes as structured data.
3633///
3634/// # Warning: Padding bytes
3635///
3636/// Note that, when a value is moved or copied, only the non-padding bytes of
3637/// that value are guaranteed to be preserved. It is unsound to assume that
3638/// values written to padding bytes are preserved after a move or copy. For
3639/// example, the following is unsound:
3640///
3641/// ```rust,no_run
3642/// use core::mem::{size_of, transmute};
3643/// use zerocopy::FromZeros;
3644/// # use zerocopy_derive::*;
3645///
3646/// // Assume `Foo` is a type with padding bytes.
3647/// #[derive(FromZeros, Default)]
3648/// struct Foo {
3649/// # /*
3650///     ...
3651/// # */
3652/// }
3653///
3654/// let mut foo: Foo = Foo::default();
3655/// FromZeros::zero(&mut foo);
3656/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3657/// // those writes are not guaranteed to be preserved in padding bytes when
3658/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3659/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3660/// ```
3661///
3662/// # Implementation
3663///
3664/// **Do not implement this trait yourself!** Instead, use
3665/// [`#[derive(FromBytes)]`][derive]; e.g.:
3666///
3667/// ```
3668/// # use zerocopy_derive::{FromBytes, Immutable};
3669/// #[derive(FromBytes)]
3670/// struct MyStruct {
3671/// # /*
3672///     ...
3673/// # */
3674/// }
3675///
3676/// #[derive(FromBytes)]
3677/// #[repr(u8)]
3678/// enum MyEnum {
3679/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3680/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3681/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3682/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3683/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3684/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3685/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3686/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3687/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3688/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3689/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3690/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3691/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3692/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3693/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3694/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3695/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3696/// #   VFF,
3697/// # /*
3698///     ...
3699/// # */
3700/// }
3701///
3702/// #[derive(FromBytes, Immutable)]
3703/// union MyUnion {
3704/// #   variant: u8,
3705/// # /*
3706///     ...
3707/// # */
3708/// }
3709/// ```
3710///
3711/// This derive performs a sophisticated, compile-time safety analysis to
3712/// determine whether a type is `FromBytes`.
3713///
3714/// # Safety
3715///
3716/// *This section describes what is required in order for `T: FromBytes`, and
3717/// what unsafe code may assume of such types. If you don't plan on implementing
3718/// `FromBytes` manually, and you don't plan on writing unsafe code that
3719/// operates on `FromBytes` types, then you don't need to read this section.*
3720///
3721/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3722/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3723/// words, any byte value which is not uninitialized). If a type is marked as
3724/// `FromBytes` which violates this contract, it may cause undefined behavior.
3725///
3726/// `#[derive(FromBytes)]` only permits [types which satisfy these
3727/// requirements][derive-analysis].
3728///
3729#[cfg_attr(
3730    feature = "derive",
3731    doc = "[derive]: zerocopy_derive::FromBytes",
3732    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3733)]
3734#[cfg_attr(
3735    not(feature = "derive"),
3736    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3737    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3738)]
3739#[cfg_attr(
3740    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3741    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3742)]
3743pub unsafe trait FromBytes: FromZeros {
3744    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3745    // safe.
3746    #[doc(hidden)]
3747    fn only_derive_is_allowed_to_implement_this_trait()
3748    where
3749        Self: Sized;
3750
3751    /// Interprets the given `source` as a `&Self`.
3752    ///
3753    /// This method attempts to return a reference to `source` interpreted as a
3754    /// `Self`. If the length of `source` is not a [valid size of
3755    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3756    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3757    /// [infallibly discard the alignment error][size-error-from].
3758    ///
3759    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3760    ///
3761    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3762    /// [self-unaligned]: Unaligned
3763    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3764    /// [slice-dst]: KnownLayout#dynamically-sized-types
3765    ///
3766    /// # Compile-Time Assertions
3767    ///
3768    /// This method cannot yet be used on unsized types whose dynamically-sized
3769    /// component is zero-sized. Attempting to use this method on such types
3770    /// results in a compile-time assertion error; e.g.:
3771    ///
3772    /// ```compile_fail,E0080
3773    /// use zerocopy::*;
3774    /// # use zerocopy_derive::*;
3775    ///
3776    /// #[derive(FromBytes, Immutable, KnownLayout)]
3777    /// #[repr(C)]
3778    /// struct ZSTy {
3779    ///     leading_sized: u16,
3780    ///     trailing_dst: [()],
3781    /// }
3782    ///
3783    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3784    /// ```
3785    ///
3786    /// # Examples
3787    ///
3788    /// ```
3789    /// use zerocopy::FromBytes;
3790    /// # use zerocopy_derive::*;
3791    ///
3792    /// #[derive(FromBytes, KnownLayout, Immutable)]
3793    /// #[repr(C)]
3794    /// struct PacketHeader {
3795    ///     src_port: [u8; 2],
3796    ///     dst_port: [u8; 2],
3797    ///     length: [u8; 2],
3798    ///     checksum: [u8; 2],
3799    /// }
3800    ///
3801    /// #[derive(FromBytes, KnownLayout, Immutable)]
3802    /// #[repr(C)]
3803    /// struct Packet {
3804    ///     header: PacketHeader,
3805    ///     body: [u8],
3806    /// }
3807    ///
3808    /// // These bytes encode a `Packet`.
3809    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3810    ///
3811    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3812    ///
3813    /// assert_eq!(packet.header.src_port, [0, 1]);
3814    /// assert_eq!(packet.header.dst_port, [2, 3]);
3815    /// assert_eq!(packet.header.length, [4, 5]);
3816    /// assert_eq!(packet.header.checksum, [6, 7]);
3817    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3818    /// ```
3819    #[must_use = "has no side effects"]
3820    #[inline]
3821    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3822    where
3823        Self: KnownLayout + Immutable,
3824    {
3825        static_assert_dst_is_not_zst!(Self);
3826        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3827            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3828            Err(err) => Err(err.map_src(|src| src.as_ref())),
3829        }
3830    }
3831
3832    /// Interprets the prefix of the given `source` as a `&Self` without
3833    /// copying.
3834    ///
3835    /// This method computes the [largest possible size of `Self`][valid-size]
3836    /// that can fit in the leading bytes of `source`, then attempts to return
3837    /// both a reference to those bytes interpreted as a `Self`, and a reference
3838    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3839    /// is not appropriately aligned, this returns `Err`. If [`Self:
3840    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3841    /// error][size-error-from].
3842    ///
3843    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3844    ///
3845    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3846    /// [self-unaligned]: Unaligned
3847    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3848    /// [slice-dst]: KnownLayout#dynamically-sized-types
3849    ///
3850    /// # Compile-Time Assertions
3851    ///
3852    /// This method cannot yet be used on unsized types whose dynamically-sized
3853    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3854    /// support such types. Attempting to use this method on such types results
3855    /// in a compile-time assertion error; e.g.:
3856    ///
3857    /// ```compile_fail,E0080
3858    /// use zerocopy::*;
3859    /// # use zerocopy_derive::*;
3860    ///
3861    /// #[derive(FromBytes, Immutable, KnownLayout)]
3862    /// #[repr(C)]
3863    /// struct ZSTy {
3864    ///     leading_sized: u16,
3865    ///     trailing_dst: [()],
3866    /// }
3867    ///
3868    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3869    /// ```
3870    ///
3871    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3872    ///
3873    /// # Examples
3874    ///
3875    /// ```
3876    /// use zerocopy::FromBytes;
3877    /// # use zerocopy_derive::*;
3878    ///
3879    /// #[derive(FromBytes, KnownLayout, Immutable)]
3880    /// #[repr(C)]
3881    /// struct PacketHeader {
3882    ///     src_port: [u8; 2],
3883    ///     dst_port: [u8; 2],
3884    ///     length: [u8; 2],
3885    ///     checksum: [u8; 2],
3886    /// }
3887    ///
3888    /// #[derive(FromBytes, KnownLayout, Immutable)]
3889    /// #[repr(C)]
3890    /// struct Packet {
3891    ///     header: PacketHeader,
3892    ///     body: [[u8; 2]],
3893    /// }
3894    ///
3895    /// // These are more bytes than are needed to encode a `Packet`.
3896    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3897    ///
3898    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3899    ///
3900    /// assert_eq!(packet.header.src_port, [0, 1]);
3901    /// assert_eq!(packet.header.dst_port, [2, 3]);
3902    /// assert_eq!(packet.header.length, [4, 5]);
3903    /// assert_eq!(packet.header.checksum, [6, 7]);
3904    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3905    /// assert_eq!(suffix, &[14u8][..]);
3906    /// ```
3907    #[must_use = "has no side effects"]
3908    #[inline]
3909    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3910    where
3911        Self: KnownLayout + Immutable,
3912    {
3913        static_assert_dst_is_not_zst!(Self);
3914        ref_from_prefix_suffix(source, None, CastType::Prefix)
3915    }
3916
3917    /// Interprets the suffix of the given bytes as a `&Self`.
3918    ///
3919    /// This method computes the [largest possible size of `Self`][valid-size]
3920    /// that can fit in the trailing bytes of `source`, then attempts to return
3921    /// both a reference to those bytes interpreted as a `Self`, and a reference
3922    /// to the preceding bytes. If there are insufficient bytes, or if that
3923    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3924    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3925    /// alignment error][size-error-from].
3926    ///
3927    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3928    ///
3929    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3930    /// [self-unaligned]: Unaligned
3931    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3932    /// [slice-dst]: KnownLayout#dynamically-sized-types
3933    ///
3934    /// # Compile-Time Assertions
3935    ///
3936    /// This method cannot yet be used on unsized types whose dynamically-sized
3937    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3938    /// support such types. Attempting to use this method on such types results
3939    /// in a compile-time assertion error; e.g.:
3940    ///
3941    /// ```compile_fail,E0080
3942    /// use zerocopy::*;
3943    /// # use zerocopy_derive::*;
3944    ///
3945    /// #[derive(FromBytes, Immutable, KnownLayout)]
3946    /// #[repr(C)]
3947    /// struct ZSTy {
3948    ///     leading_sized: u16,
3949    ///     trailing_dst: [()],
3950    /// }
3951    ///
3952    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3953    /// ```
3954    ///
3955    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3956    ///
3957    /// # Examples
3958    ///
3959    /// ```
3960    /// use zerocopy::FromBytes;
3961    /// # use zerocopy_derive::*;
3962    ///
3963    /// #[derive(FromBytes, Immutable, KnownLayout)]
3964    /// #[repr(C)]
3965    /// struct PacketTrailer {
3966    ///     frame_check_sequence: [u8; 4],
3967    /// }
3968    ///
3969    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3970    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3971    ///
3972    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3973    ///
3974    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3975    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3976    /// ```
3977    #[must_use = "has no side effects"]
3978    #[inline]
3979    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3980    where
3981        Self: Immutable + KnownLayout,
3982    {
3983        static_assert_dst_is_not_zst!(Self);
3984        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3985    }
3986
3987    /// Interprets the given `source` as a `&mut Self`.
3988    ///
3989    /// This method attempts to return a reference to `source` interpreted as a
3990    /// `Self`. If the length of `source` is not a [valid size of
3991    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3992    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3993    /// [infallibly discard the alignment error][size-error-from].
3994    ///
3995    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3996    ///
3997    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3998    /// [self-unaligned]: Unaligned
3999    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4000    /// [slice-dst]: KnownLayout#dynamically-sized-types
4001    ///
4002    /// # Compile-Time Assertions
4003    ///
4004    /// This method cannot yet be used on unsized types whose dynamically-sized
4005    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
4006    /// support such types. Attempting to use this method on such types results
4007    /// in a compile-time assertion error; e.g.:
4008    ///
4009    /// ```compile_fail,E0080
4010    /// use zerocopy::*;
4011    /// # use zerocopy_derive::*;
4012    ///
4013    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4014    /// #[repr(C, packed)]
4015    /// struct ZSTy {
4016    ///     leading_sized: [u8; 2],
4017    ///     trailing_dst: [()],
4018    /// }
4019    ///
4020    /// let mut source = [85, 85];
4021    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
4022    /// ```
4023    ///
4024    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
4025    ///
4026    /// # Examples
4027    ///
4028    /// ```
4029    /// use zerocopy::FromBytes;
4030    /// # use zerocopy_derive::*;
4031    ///
4032    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4033    /// #[repr(C)]
4034    /// struct PacketHeader {
4035    ///     src_port: [u8; 2],
4036    ///     dst_port: [u8; 2],
4037    ///     length: [u8; 2],
4038    ///     checksum: [u8; 2],
4039    /// }
4040    ///
4041    /// // These bytes encode a `PacketHeader`.
4042    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4043    ///
4044    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
4045    ///
4046    /// assert_eq!(header.src_port, [0, 1]);
4047    /// assert_eq!(header.dst_port, [2, 3]);
4048    /// assert_eq!(header.length, [4, 5]);
4049    /// assert_eq!(header.checksum, [6, 7]);
4050    ///
4051    /// header.checksum = [0, 0];
4052    ///
4053    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4054    /// ```
4055    #[must_use = "has no side effects"]
4056    #[inline]
4057    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4058    where
4059        Self: IntoBytes + KnownLayout,
4060    {
4061        static_assert_dst_is_not_zst!(Self);
4062        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4063            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4064            Err(err) => Err(err.map_src(|src| src.as_mut())),
4065        }
4066    }
4067
4068    /// Interprets the prefix of the given `source` as a `&mut Self` without
4069    /// copying.
4070    ///
4071    /// This method computes the [largest possible size of `Self`][valid-size]
4072    /// that can fit in the leading bytes of `source`, then attempts to return
4073    /// both a reference to those bytes interpreted as a `Self`, and a reference
4074    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4075    /// is not appropriately aligned, this returns `Err`. If [`Self:
4076    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4077    /// error][size-error-from].
4078    ///
4079    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4080    ///
4081    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4082    /// [self-unaligned]: Unaligned
4083    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4084    /// [slice-dst]: KnownLayout#dynamically-sized-types
4085    ///
4086    /// # Compile-Time Assertions
4087    ///
4088    /// This method cannot yet be used on unsized types whose dynamically-sized
4089    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4090    /// support such types. Attempting to use this method on such types results
4091    /// in a compile-time assertion error; e.g.:
4092    ///
4093    /// ```compile_fail,E0080
4094    /// use zerocopy::*;
4095    /// # use zerocopy_derive::*;
4096    ///
4097    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4098    /// #[repr(C, packed)]
4099    /// struct ZSTy {
4100    ///     leading_sized: [u8; 2],
4101    ///     trailing_dst: [()],
4102    /// }
4103    ///
4104    /// let mut source = [85, 85];
4105    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
4106    /// ```
4107    ///
4108    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4109    ///
4110    /// # Examples
4111    ///
4112    /// ```
4113    /// use zerocopy::FromBytes;
4114    /// # use zerocopy_derive::*;
4115    ///
4116    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4117    /// #[repr(C)]
4118    /// struct PacketHeader {
4119    ///     src_port: [u8; 2],
4120    ///     dst_port: [u8; 2],
4121    ///     length: [u8; 2],
4122    ///     checksum: [u8; 2],
4123    /// }
4124    ///
4125    /// // These are more bytes than are needed to encode a `PacketHeader`.
4126    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4127    ///
4128    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4129    ///
4130    /// assert_eq!(header.src_port, [0, 1]);
4131    /// assert_eq!(header.dst_port, [2, 3]);
4132    /// assert_eq!(header.length, [4, 5]);
4133    /// assert_eq!(header.checksum, [6, 7]);
4134    /// assert_eq!(body, &[8, 9][..]);
4135    ///
4136    /// header.checksum = [0, 0];
4137    /// body.fill(1);
4138    ///
4139    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4140    /// ```
4141    #[must_use = "has no side effects"]
4142    #[inline]
4143    fn mut_from_prefix(
4144        source: &mut [u8],
4145    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4146    where
4147        Self: IntoBytes + KnownLayout,
4148    {
4149        static_assert_dst_is_not_zst!(Self);
4150        mut_from_prefix_suffix(source, None, CastType::Prefix)
4151    }
4152
4153    /// Interprets the suffix of the given `source` as a `&mut Self` without
4154    /// copying.
4155    ///
4156    /// This method computes the [largest possible size of `Self`][valid-size]
4157    /// that can fit in the trailing bytes of `source`, then attempts to return
4158    /// both a reference to those bytes interpreted as a `Self`, and a reference
4159    /// to the preceding bytes. If there are insufficient bytes, or if that
4160    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4161    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4162    /// alignment error][size-error-from].
4163    ///
4164    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4165    ///
4166    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4167    /// [self-unaligned]: Unaligned
4168    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4169    /// [slice-dst]: KnownLayout#dynamically-sized-types
4170    ///
4171    /// # Compile-Time Assertions
4172    ///
4173    /// This method cannot yet be used on unsized types whose dynamically-sized
4174    /// component is zero-sized. Attempting to use this method on such types
4175    /// results in a compile-time assertion error; e.g.:
4176    ///
4177    /// ```compile_fail,E0080
4178    /// use zerocopy::*;
4179    /// # use zerocopy_derive::*;
4180    ///
4181    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4182    /// #[repr(C, packed)]
4183    /// struct ZSTy {
4184    ///     leading_sized: [u8; 2],
4185    ///     trailing_dst: [()],
4186    /// }
4187    ///
4188    /// let mut source = [85, 85];
4189    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
4190    /// ```
4191    ///
4192    /// # Examples
4193    ///
4194    /// ```
4195    /// use zerocopy::FromBytes;
4196    /// # use zerocopy_derive::*;
4197    ///
4198    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4199    /// #[repr(C)]
4200    /// struct PacketTrailer {
4201    ///     frame_check_sequence: [u8; 4],
4202    /// }
4203    ///
4204    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4205    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4206    ///
4207    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4208    ///
4209    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4210    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4211    ///
4212    /// prefix.fill(0);
4213    /// trailer.frame_check_sequence.fill(1);
4214    ///
4215    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4216    /// ```
4217    #[must_use = "has no side effects"]
4218    #[inline]
4219    fn mut_from_suffix(
4220        source: &mut [u8],
4221    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4222    where
4223        Self: IntoBytes + KnownLayout,
4224    {
4225        static_assert_dst_is_not_zst!(Self);
4226        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4227    }
4228
4229    /// Interprets the given `source` as a `&Self` with a DST length equal to
4230    /// `count`.
4231    ///
4232    /// This method attempts to return a reference to `source` interpreted as a
4233    /// `Self` with `count` trailing elements. If the length of `source` is not
4234    /// equal to the size of `Self` with `count` elements, or if `source` is not
4235    /// appropriately aligned, this returns `Err`. If [`Self:
4236    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4237    /// error][size-error-from].
4238    ///
4239    /// [self-unaligned]: Unaligned
4240    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4241    ///
4242    /// # Examples
4243    ///
4244    /// ```
4245    /// use zerocopy::FromBytes;
4246    /// # use zerocopy_derive::*;
4247    ///
4248    /// # #[derive(Debug, PartialEq, Eq)]
4249    /// #[derive(FromBytes, Immutable)]
4250    /// #[repr(C)]
4251    /// struct Pixel {
4252    ///     r: u8,
4253    ///     g: u8,
4254    ///     b: u8,
4255    ///     a: u8,
4256    /// }
4257    ///
4258    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4259    ///
4260    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4261    ///
4262    /// assert_eq!(pixels, &[
4263    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4264    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4265    /// ]);
4266    ///
4267    /// ```
4268    ///
4269    /// Since an explicit `count` is provided, this method supports types with
4270    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4271    /// which do not take an explicit count do not support such types.
4272    ///
4273    /// ```
4274    /// use zerocopy::*;
4275    /// # use zerocopy_derive::*;
4276    ///
4277    /// #[derive(FromBytes, Immutable, KnownLayout)]
4278    /// #[repr(C)]
4279    /// struct ZSTy {
4280    ///     leading_sized: [u8; 2],
4281    ///     trailing_dst: [()],
4282    /// }
4283    ///
4284    /// let src = &[85, 85][..];
4285    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4286    /// assert_eq!(zsty.trailing_dst.len(), 42);
4287    /// ```
4288    ///
4289    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4290    #[must_use = "has no side effects"]
4291    #[inline]
4292    fn ref_from_bytes_with_elems(
4293        source: &[u8],
4294        count: usize,
4295    ) -> Result<&Self, CastError<&[u8], Self>>
4296    where
4297        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4298    {
4299        let source = Ptr::from_ref(source);
4300        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4301        match maybe_slf {
4302            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4303            Err(err) => Err(err.map_src(|s| s.as_ref())),
4304        }
4305    }
4306
4307    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4308    /// equal to `count`.
4309    ///
4310    /// This method attempts to return a reference to the prefix of `source`
4311    /// interpreted as a `Self` with `count` trailing elements, and a reference
4312    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4313    /// is not appropriately aligned, this returns `Err`. If [`Self:
4314    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4315    /// error][size-error-from].
4316    ///
4317    /// [self-unaligned]: Unaligned
4318    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4319    ///
4320    /// # Examples
4321    ///
4322    /// ```
4323    /// use zerocopy::FromBytes;
4324    /// # use zerocopy_derive::*;
4325    ///
4326    /// # #[derive(Debug, PartialEq, Eq)]
4327    /// #[derive(FromBytes, Immutable)]
4328    /// #[repr(C)]
4329    /// struct Pixel {
4330    ///     r: u8,
4331    ///     g: u8,
4332    ///     b: u8,
4333    ///     a: u8,
4334    /// }
4335    ///
4336    /// // These are more bytes than are needed to encode two `Pixel`s.
4337    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4338    ///
4339    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4340    ///
4341    /// assert_eq!(pixels, &[
4342    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4343    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4344    /// ]);
4345    ///
4346    /// assert_eq!(suffix, &[8, 9]);
4347    /// ```
4348    ///
4349    /// Since an explicit `count` is provided, this method supports types with
4350    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4351    /// which do not take an explicit count do not support such types.
4352    ///
4353    /// ```
4354    /// use zerocopy::*;
4355    /// # use zerocopy_derive::*;
4356    ///
4357    /// #[derive(FromBytes, Immutable, KnownLayout)]
4358    /// #[repr(C)]
4359    /// struct ZSTy {
4360    ///     leading_sized: [u8; 2],
4361    ///     trailing_dst: [()],
4362    /// }
4363    ///
4364    /// let src = &[85, 85][..];
4365    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4366    /// assert_eq!(zsty.trailing_dst.len(), 42);
4367    /// ```
4368    ///
4369    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4370    #[must_use = "has no side effects"]
4371    #[inline]
4372    fn ref_from_prefix_with_elems(
4373        source: &[u8],
4374        count: usize,
4375    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4376    where
4377        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4378    {
4379        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4380    }
4381
4382    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4383    /// equal to `count`.
4384    ///
4385    /// This method attempts to return a reference to the suffix of `source`
4386    /// interpreted as a `Self` with `count` trailing elements, and a reference
4387    /// to the preceding bytes. If there are insufficient bytes, or if that
4388    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4389    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4390    /// alignment error][size-error-from].
4391    ///
4392    /// [self-unaligned]: Unaligned
4393    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4394    ///
4395    /// # Examples
4396    ///
4397    /// ```
4398    /// use zerocopy::FromBytes;
4399    /// # use zerocopy_derive::*;
4400    ///
4401    /// # #[derive(Debug, PartialEq, Eq)]
4402    /// #[derive(FromBytes, Immutable)]
4403    /// #[repr(C)]
4404    /// struct Pixel {
4405    ///     r: u8,
4406    ///     g: u8,
4407    ///     b: u8,
4408    ///     a: u8,
4409    /// }
4410    ///
4411    /// // These are more bytes than are needed to encode two `Pixel`s.
4412    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4413    ///
4414    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4415    ///
4416    /// assert_eq!(prefix, &[0, 1]);
4417    ///
4418    /// assert_eq!(pixels, &[
4419    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4420    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4421    /// ]);
4422    /// ```
4423    ///
4424    /// Since an explicit `count` is provided, this method supports types with
4425    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4426    /// which do not take an explicit count do not support such types.
4427    ///
4428    /// ```
4429    /// use zerocopy::*;
4430    /// # use zerocopy_derive::*;
4431    ///
4432    /// #[derive(FromBytes, Immutable, KnownLayout)]
4433    /// #[repr(C)]
4434    /// struct ZSTy {
4435    ///     leading_sized: [u8; 2],
4436    ///     trailing_dst: [()],
4437    /// }
4438    ///
4439    /// let src = &[85, 85][..];
4440    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4441    /// assert_eq!(zsty.trailing_dst.len(), 42);
4442    /// ```
4443    ///
4444    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4445    #[must_use = "has no side effects"]
4446    #[inline]
4447    fn ref_from_suffix_with_elems(
4448        source: &[u8],
4449        count: usize,
4450    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4451    where
4452        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4453    {
4454        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4455    }
4456
4457    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4458    /// to `count`.
4459    ///
4460    /// This method attempts to return a reference to `source` interpreted as a
4461    /// `Self` with `count` trailing elements. If the length of `source` is not
4462    /// equal to the size of `Self` with `count` elements, or if `source` is not
4463    /// appropriately aligned, this returns `Err`. If [`Self:
4464    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4465    /// error][size-error-from].
4466    ///
4467    /// [self-unaligned]: Unaligned
4468    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4469    ///
4470    /// # Examples
4471    ///
4472    /// ```
4473    /// use zerocopy::FromBytes;
4474    /// # use zerocopy_derive::*;
4475    ///
4476    /// # #[derive(Debug, PartialEq, Eq)]
4477    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4478    /// #[repr(C)]
4479    /// struct Pixel {
4480    ///     r: u8,
4481    ///     g: u8,
4482    ///     b: u8,
4483    ///     a: u8,
4484    /// }
4485    ///
4486    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4487    ///
4488    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4489    ///
4490    /// assert_eq!(pixels, &[
4491    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4492    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4493    /// ]);
4494    ///
4495    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4496    ///
4497    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4498    /// ```
4499    ///
4500    /// Since an explicit `count` is provided, this method supports types with
4501    /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4502    /// which do not take an explicit count do not support such types.
4503    ///
4504    /// ```
4505    /// use zerocopy::*;
4506    /// # use zerocopy_derive::*;
4507    ///
4508    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4509    /// #[repr(C, packed)]
4510    /// struct ZSTy {
4511    ///     leading_sized: [u8; 2],
4512    ///     trailing_dst: [()],
4513    /// }
4514    ///
4515    /// let src = &mut [85, 85][..];
4516    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4517    /// assert_eq!(zsty.trailing_dst.len(), 42);
4518    /// ```
4519    ///
4520    /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4521    #[must_use = "has no side effects"]
4522    #[inline]
4523    fn mut_from_bytes_with_elems(
4524        source: &mut [u8],
4525        count: usize,
4526    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4527    where
4528        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4529    {
4530        let source = Ptr::from_mut(source);
4531        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4532        match maybe_slf {
4533            Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4534            Err(err) => Err(err.map_src(|s| s.as_mut())),
4535        }
4536    }
4537
4538    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4539    /// length equal to `count`.
4540    ///
4541    /// This method attempts to return a reference to the prefix of `source`
4542    /// interpreted as a `Self` with `count` trailing elements, and a reference
4543    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4544    /// is not appropriately aligned, this returns `Err`. If [`Self:
4545    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4546    /// error][size-error-from].
4547    ///
4548    /// [self-unaligned]: Unaligned
4549    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4550    ///
4551    /// # Examples
4552    ///
4553    /// ```
4554    /// use zerocopy::FromBytes;
4555    /// # use zerocopy_derive::*;
4556    ///
4557    /// # #[derive(Debug, PartialEq, Eq)]
4558    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4559    /// #[repr(C)]
4560    /// struct Pixel {
4561    ///     r: u8,
4562    ///     g: u8,
4563    ///     b: u8,
4564    ///     a: u8,
4565    /// }
4566    ///
4567    /// // These are more bytes than are needed to encode two `Pixel`s.
4568    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4569    ///
4570    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4571    ///
4572    /// assert_eq!(pixels, &[
4573    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4574    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4575    /// ]);
4576    ///
4577    /// assert_eq!(suffix, &[8, 9]);
4578    ///
4579    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4580    /// suffix.fill(1);
4581    ///
4582    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4583    /// ```
4584    ///
4585    /// Since an explicit `count` is provided, this method supports types with
4586    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4587    /// which do not take an explicit count do not support such types.
4588    ///
4589    /// ```
4590    /// use zerocopy::*;
4591    /// # use zerocopy_derive::*;
4592    ///
4593    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4594    /// #[repr(C, packed)]
4595    /// struct ZSTy {
4596    ///     leading_sized: [u8; 2],
4597    ///     trailing_dst: [()],
4598    /// }
4599    ///
4600    /// let src = &mut [85, 85][..];
4601    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4602    /// assert_eq!(zsty.trailing_dst.len(), 42);
4603    /// ```
4604    ///
4605    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4606    #[must_use = "has no side effects"]
4607    #[inline]
4608    fn mut_from_prefix_with_elems(
4609        source: &mut [u8],
4610        count: usize,
4611    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4612    where
4613        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4614    {
4615        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4616    }
4617
4618    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4619    /// length equal to `count`.
4620    ///
4621    /// This method attempts to return a reference to the suffix of `source`
4622    /// interpreted as a `Self` with `count` trailing elements, and a reference
4623    /// to the remaining bytes. If there are insufficient bytes, or if that
4624    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4625    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4626    /// alignment error][size-error-from].
4627    ///
4628    /// [self-unaligned]: Unaligned
4629    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4630    ///
4631    /// # Examples
4632    ///
4633    /// ```
4634    /// use zerocopy::FromBytes;
4635    /// # use zerocopy_derive::*;
4636    ///
4637    /// # #[derive(Debug, PartialEq, Eq)]
4638    /// #[derive(FromBytes, IntoBytes, Immutable)]
4639    /// #[repr(C)]
4640    /// struct Pixel {
4641    ///     r: u8,
4642    ///     g: u8,
4643    ///     b: u8,
4644    ///     a: u8,
4645    /// }
4646    ///
4647    /// // These are more bytes than are needed to encode two `Pixel`s.
4648    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4649    ///
4650    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4651    ///
4652    /// assert_eq!(prefix, &[0, 1]);
4653    ///
4654    /// assert_eq!(pixels, &[
4655    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4656    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4657    /// ]);
4658    ///
4659    /// prefix.fill(9);
4660    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4661    ///
4662    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4663    /// ```
4664    ///
4665    /// Since an explicit `count` is provided, this method supports types with
4666    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4667    /// which do not take an explicit count do not support such types.
4668    ///
4669    /// ```
4670    /// use zerocopy::*;
4671    /// # use zerocopy_derive::*;
4672    ///
4673    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4674    /// #[repr(C, packed)]
4675    /// struct ZSTy {
4676    ///     leading_sized: [u8; 2],
4677    ///     trailing_dst: [()],
4678    /// }
4679    ///
4680    /// let src = &mut [85, 85][..];
4681    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4682    /// assert_eq!(zsty.trailing_dst.len(), 42);
4683    /// ```
4684    ///
4685    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4686    #[must_use = "has no side effects"]
4687    #[inline]
4688    fn mut_from_suffix_with_elems(
4689        source: &mut [u8],
4690        count: usize,
4691    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4692    where
4693        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4694    {
4695        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4696    }
4697
4698    /// Reads a copy of `Self` from the given `source`.
4699    ///
4700    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4701    ///
4702    /// # Examples
4703    ///
4704    /// ```
4705    /// use zerocopy::FromBytes;
4706    /// # use zerocopy_derive::*;
4707    ///
4708    /// #[derive(FromBytes)]
4709    /// #[repr(C)]
4710    /// struct PacketHeader {
4711    ///     src_port: [u8; 2],
4712    ///     dst_port: [u8; 2],
4713    ///     length: [u8; 2],
4714    ///     checksum: [u8; 2],
4715    /// }
4716    ///
4717    /// // These bytes encode a `PacketHeader`.
4718    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4719    ///
4720    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4721    ///
4722    /// assert_eq!(header.src_port, [0, 1]);
4723    /// assert_eq!(header.dst_port, [2, 3]);
4724    /// assert_eq!(header.length, [4, 5]);
4725    /// assert_eq!(header.checksum, [6, 7]);
4726    /// ```
4727    #[must_use = "has no side effects"]
4728    #[inline]
4729    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4730    where
4731        Self: Sized,
4732    {
4733        match Ref::<_, Unalign<Self>>::sized_from(source) {
4734            Ok(r) => Ok(Ref::read(&r).into_inner()),
4735            Err(CastError::Size(e)) => Err(e.with_dst()),
4736            Err(CastError::Alignment(_)) => {
4737                // SAFETY: `Unalign<Self>` is trivially aligned, so
4738                // `Ref::sized_from` cannot fail due to unmet alignment
4739                // requirements.
4740                unsafe { core::hint::unreachable_unchecked() }
4741            }
4742            Err(CastError::Validity(i)) => match i {},
4743        }
4744    }
4745
4746    /// Reads a copy of `Self` from the prefix of the given `source`.
4747    ///
4748    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4749    /// of `source`, returning that `Self` and any remaining bytes. If
4750    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4751    ///
4752    /// # Examples
4753    ///
4754    /// ```
4755    /// use zerocopy::FromBytes;
4756    /// # use zerocopy_derive::*;
4757    ///
4758    /// #[derive(FromBytes)]
4759    /// #[repr(C)]
4760    /// struct PacketHeader {
4761    ///     src_port: [u8; 2],
4762    ///     dst_port: [u8; 2],
4763    ///     length: [u8; 2],
4764    ///     checksum: [u8; 2],
4765    /// }
4766    ///
4767    /// // These are more bytes than are needed to encode a `PacketHeader`.
4768    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4769    ///
4770    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4771    ///
4772    /// assert_eq!(header.src_port, [0, 1]);
4773    /// assert_eq!(header.dst_port, [2, 3]);
4774    /// assert_eq!(header.length, [4, 5]);
4775    /// assert_eq!(header.checksum, [6, 7]);
4776    /// assert_eq!(body, [8, 9]);
4777    /// ```
4778    #[must_use = "has no side effects"]
4779    #[inline]
4780    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4781    where
4782        Self: Sized,
4783    {
4784        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4785            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4786            Err(CastError::Size(e)) => Err(e.with_dst()),
4787            Err(CastError::Alignment(_)) => {
4788                // SAFETY: `Unalign<Self>` is trivially aligned, so
4789                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4790                // requirements.
4791                unsafe { core::hint::unreachable_unchecked() }
4792            }
4793            Err(CastError::Validity(i)) => match i {},
4794        }
4795    }
4796
4797    /// Reads a copy of `Self` from the suffix of the given `source`.
4798    ///
4799    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4800    /// of `source`, returning that `Self` and any preceding bytes. If
4801    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4802    ///
4803    /// # Examples
4804    ///
4805    /// ```
4806    /// use zerocopy::FromBytes;
4807    /// # use zerocopy_derive::*;
4808    ///
4809    /// #[derive(FromBytes)]
4810    /// #[repr(C)]
4811    /// struct PacketTrailer {
4812    ///     frame_check_sequence: [u8; 4],
4813    /// }
4814    ///
4815    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4816    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4817    ///
4818    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4819    ///
4820    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4821    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4822    /// ```
4823    #[must_use = "has no side effects"]
4824    #[inline]
4825    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4826    where
4827        Self: Sized,
4828    {
4829        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4830            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4831            Err(CastError::Size(e)) => Err(e.with_dst()),
4832            Err(CastError::Alignment(_)) => {
4833                // SAFETY: `Unalign<Self>` is trivially aligned, so
4834                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4835                // requirements.
4836                unsafe { core::hint::unreachable_unchecked() }
4837            }
4838            Err(CastError::Validity(i)) => match i {},
4839        }
4840    }
4841
4842    /// Reads a copy of `self` from an `io::Read`.
4843    ///
4844    /// This is useful for interfacing with operating system byte sinks (files,
4845    /// sockets, etc.).
4846    ///
4847    /// # Examples
4848    ///
4849    /// ```no_run
4850    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4851    /// use std::fs::File;
4852    /// # use zerocopy_derive::*;
4853    ///
4854    /// #[derive(FromBytes)]
4855    /// #[repr(C)]
4856    /// struct BitmapFileHeader {
4857    ///     signature: [u8; 2],
4858    ///     size: U32,
4859    ///     reserved: U64,
4860    ///     offset: U64,
4861    /// }
4862    ///
4863    /// let mut file = File::open("image.bin").unwrap();
4864    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4865    /// ```
4866    #[cfg(feature = "std")]
4867    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
4868    #[inline(always)]
4869    fn read_from_io<R>(mut src: R) -> io::Result<Self>
4870    where
4871        Self: Sized,
4872        R: io::Read,
4873    {
4874        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4875        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4876        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4877        // will not necessarily preserve zeros written to those padding byte
4878        // locations, and so `buf` could contain uninitialized bytes.
4879        let mut buf = CoreMaybeUninit::<Self>::uninit();
4880        buf.zero();
4881
4882        let ptr = Ptr::from_mut(&mut buf);
4883        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4884        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4885        // cannot be used to write values which will violate `buf`'s bit
4886        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4887        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4888        // cannot be violated even though `buf` may have more permissive bit
4889        // validity than `ptr`.
4890        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4891        let ptr = ptr.as_bytes();
4892        src.read_exact(ptr.as_mut())?;
4893        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4894        // `FromBytes`.
4895        Ok(unsafe { buf.assume_init() })
4896    }
4897
4898    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4899    #[doc(hidden)]
4900    #[must_use = "has no side effects"]
4901    #[inline(always)]
4902    fn ref_from(source: &[u8]) -> Option<&Self>
4903    where
4904        Self: KnownLayout + Immutable,
4905    {
4906        Self::ref_from_bytes(source).ok()
4907    }
4908
4909    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4910    #[doc(hidden)]
4911    #[must_use = "has no side effects"]
4912    #[inline(always)]
4913    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4914    where
4915        Self: KnownLayout + IntoBytes,
4916    {
4917        Self::mut_from_bytes(source).ok()
4918    }
4919
4920    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4921    #[doc(hidden)]
4922    #[must_use = "has no side effects"]
4923    #[inline(always)]
4924    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4925    where
4926        Self: Sized + Immutable,
4927    {
4928        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4929    }
4930
4931    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4932    #[doc(hidden)]
4933    #[must_use = "has no side effects"]
4934    #[inline(always)]
4935    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4936    where
4937        Self: Sized + Immutable,
4938    {
4939        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4940    }
4941
4942    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4943    #[doc(hidden)]
4944    #[must_use = "has no side effects"]
4945    #[inline(always)]
4946    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4947    where
4948        Self: Sized + IntoBytes,
4949    {
4950        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4951    }
4952
4953    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4954    #[doc(hidden)]
4955    #[must_use = "has no side effects"]
4956    #[inline(always)]
4957    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4958    where
4959        Self: Sized + IntoBytes,
4960    {
4961        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4962    }
4963
4964    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4965    #[doc(hidden)]
4966    #[must_use = "has no side effects"]
4967    #[inline(always)]
4968    fn read_from(source: &[u8]) -> Option<Self>
4969    where
4970        Self: Sized,
4971    {
4972        Self::read_from_bytes(source).ok()
4973    }
4974}
4975
4976/// Interprets the given affix of the given bytes as a `&Self`.
4977///
4978/// This method computes the largest possible size of `Self` that can fit in the
4979/// prefix or suffix bytes of `source`, then attempts to return both a reference
4980/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4981/// If there are insufficient bytes, or if that affix of `source` is not
4982/// appropriately aligned, this returns `Err`.
4983#[inline(always)]
4984fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4985    source: &[u8],
4986    meta: Option<T::PointerMetadata>,
4987    cast_type: CastType,
4988) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4989    let (slf, prefix_suffix) = Ptr::from_ref(source)
4990        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4991        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4992    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4993}
4994
4995/// Interprets the given affix of the given bytes as a `&mut Self` without
4996/// copying.
4997///
4998/// This method computes the largest possible size of `Self` that can fit in the
4999/// prefix or suffix bytes of `source`, then attempts to return both a reference
5000/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5001/// If there are insufficient bytes, or if that affix of `source` is not
5002/// appropriately aligned, this returns `Err`.
5003#[inline(always)]
5004fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
5005    source: &mut [u8],
5006    meta: Option<T::PointerMetadata>,
5007    cast_type: CastType,
5008) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
5009    let (slf, prefix_suffix) = Ptr::from_mut(source)
5010        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
5011        .map_err(|err| err.map_src(|s| s.as_mut()))?;
5012    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
5013}
5014
5015/// Analyzes whether a type is [`IntoBytes`].
5016///
5017/// This derive analyzes, at compile time, whether the annotated type satisfies
5018/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
5019/// sound to do so. This derive can be applied to structs and enums (see below
5020/// for union support); e.g.:
5021///
5022/// ```
5023/// # use zerocopy_derive::{IntoBytes};
5024/// #[derive(IntoBytes)]
5025/// #[repr(C)]
5026/// struct MyStruct {
5027/// # /*
5028///     ...
5029/// # */
5030/// }
5031///
5032/// #[derive(IntoBytes)]
5033/// #[repr(u8)]
5034/// enum MyEnum {
5035/// #   Variant,
5036/// # /*
5037///     ...
5038/// # */
5039/// }
5040/// ```
5041///
5042/// [safety conditions]: trait@IntoBytes#safety
5043///
5044/// # Error Messages
5045///
5046/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
5047/// for `IntoBytes` is implemented, you may get an error like this:
5048///
5049/// ```text
5050/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5051///   --> lib.rs:23:10
5052///    |
5053///  1 | #[derive(IntoBytes)]
5054///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5055///    |
5056///    = help: the following implementations were found:
5057///                   <() as PaddingFree<T, false>>
5058/// ```
5059///
5060/// This error indicates that the type being annotated has padding bytes, which
5061/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5062/// fields by using types in the [`byteorder`] module, wrapping field types in
5063/// [`Unalign`], adding explicit struct fields where those padding bytes would
5064/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5065/// layout] for more information about type layout and padding.
5066///
5067/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5068///
5069/// # Unions
5070///
5071/// Currently, union bit validity is [up in the air][union-validity], and so
5072/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5073/// However, implementing `IntoBytes` on a union type is likely sound on all
5074/// existing Rust toolchains - it's just that it may become unsound in the
5075/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5076/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5077///
5078/// ```shell
5079/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5080/// ```
5081///
5082/// However, it is your responsibility to ensure that this derive is sound on
5083/// the specific versions of the Rust toolchain you are using! We make no
5084/// stability or soundness guarantees regarding this cfg, and may remove it at
5085/// any point.
5086///
5087/// We are actively working with Rust to stabilize the necessary language
5088/// guarantees to support this in a forwards-compatible way, which will enable
5089/// us to remove the cfg gate. As part of this effort, we need to know how much
5090/// demand there is for this feature. If you would like to use `IntoBytes` on
5091/// unions, [please let us know][discussion].
5092///
5093/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5094/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5095///
5096/// # Analysis
5097///
5098/// *This section describes, roughly, the analysis performed by this derive to
5099/// determine whether it is sound to implement `IntoBytes` for a given type.
5100/// Unless you are modifying the implementation of this derive, or attempting to
5101/// manually implement `IntoBytes` for a type yourself, you don't need to read
5102/// this section.*
5103///
5104/// If a type has the following properties, then this derive can implement
5105/// `IntoBytes` for that type:
5106///
5107/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5108///     - if the type is `repr(transparent)` or `repr(packed)`, it is
5109///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5110///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5111///       if its field is [`IntoBytes`]; else,
5112///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
5113///       is sized and has no padding bytes; else,
5114///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
5115/// - If the type is an enum:
5116///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5117///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5118///   - It must have no padding bytes.
5119///   - Its fields must be [`IntoBytes`].
5120///
5121/// This analysis is subject to change. Unsafe code may *only* rely on the
5122/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5123/// implementation details of this derive.
5124///
5125/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5126#[cfg(any(feature = "derive", test))]
5127#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5128pub use zerocopy_derive::IntoBytes;
5129
5130/// Types that can be converted to an immutable slice of initialized bytes.
5131///
5132/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5133/// same size. This is useful for efficiently serializing structured data as raw
5134/// bytes.
5135///
5136/// # Implementation
5137///
5138/// **Do not implement this trait yourself!** Instead, use
5139/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5140///
5141/// ```
5142/// # use zerocopy_derive::IntoBytes;
5143/// #[derive(IntoBytes)]
5144/// #[repr(C)]
5145/// struct MyStruct {
5146/// # /*
5147///     ...
5148/// # */
5149/// }
5150///
5151/// #[derive(IntoBytes)]
5152/// #[repr(u8)]
5153/// enum MyEnum {
5154/// #   Variant0,
5155/// # /*
5156///     ...
5157/// # */
5158/// }
5159/// ```
5160///
5161/// This derive performs a sophisticated, compile-time safety analysis to
5162/// determine whether a type is `IntoBytes`. See the [derive
5163/// documentation][derive] for guidance on how to interpret error messages
5164/// produced by the derive's analysis.
5165///
5166/// # Safety
5167///
5168/// *This section describes what is required in order for `T: IntoBytes`, and
5169/// what unsafe code may assume of such types. If you don't plan on implementing
5170/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5171/// operates on `IntoBytes` types, then you don't need to read this section.*
5172///
5173/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5174/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5175/// marked as `IntoBytes` which violates this contract, it may cause undefined
5176/// behavior.
5177///
5178/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5179/// requirements][derive-analysis].
5180///
5181#[cfg_attr(
5182    feature = "derive",
5183    doc = "[derive]: zerocopy_derive::IntoBytes",
5184    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5185)]
5186#[cfg_attr(
5187    not(feature = "derive"),
5188    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5189    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5190)]
5191#[cfg_attr(
5192    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5193    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5194)]
5195pub unsafe trait IntoBytes {
5196    // The `Self: Sized` bound makes it so that this function doesn't prevent
5197    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5198    // prevent object safety, but those provide a benefit in exchange for object
5199    // safety. If at some point we remove those methods, change their type
5200    // signatures, or move them out of this trait so that `IntoBytes` is object
5201    // safe again, it's important that this function not prevent object safety.
5202    #[doc(hidden)]
5203    fn only_derive_is_allowed_to_implement_this_trait()
5204    where
5205        Self: Sized;
5206
5207    /// Gets the bytes of this value.
5208    ///
5209    /// # Examples
5210    ///
5211    /// ```
5212    /// use zerocopy::IntoBytes;
5213    /// # use zerocopy_derive::*;
5214    ///
5215    /// #[derive(IntoBytes, Immutable)]
5216    /// #[repr(C)]
5217    /// struct PacketHeader {
5218    ///     src_port: [u8; 2],
5219    ///     dst_port: [u8; 2],
5220    ///     length: [u8; 2],
5221    ///     checksum: [u8; 2],
5222    /// }
5223    ///
5224    /// let header = PacketHeader {
5225    ///     src_port: [0, 1],
5226    ///     dst_port: [2, 3],
5227    ///     length: [4, 5],
5228    ///     checksum: [6, 7],
5229    /// };
5230    ///
5231    /// let bytes = header.as_bytes();
5232    ///
5233    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5234    /// ```
5235    #[must_use = "has no side effects"]
5236    #[inline(always)]
5237    fn as_bytes(&self) -> &[u8]
5238    where
5239        Self: Immutable,
5240    {
5241        // Note that this method does not have a `Self: Sized` bound;
5242        // `size_of_val` works for unsized values too.
5243        let len = mem::size_of_val(self);
5244        let slf: *const Self = self;
5245
5246        // SAFETY:
5247        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5248        //   many bytes because...
5249        //   - `slf` is the same pointer as `self`, and `self` is a reference
5250        //     which points to an object whose size is `len`. Thus...
5251        //     - The entire region of `len` bytes starting at `slf` is contained
5252        //       within a single allocation.
5253        //     - `slf` is non-null.
5254        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5255        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5256        //   initialized.
5257        // - Since `slf` is derived from `self`, and `self` is an immutable
5258        //   reference, the only other references to this memory region that
5259        //   could exist are other immutable references, which by `Self:
5260        //   Immutable` don't permit mutation.
5261        // - The total size of the resulting slice is no larger than
5262        //   `isize::MAX` because no allocation produced by safe code can be
5263        //   larger than `isize::MAX`.
5264        //
5265        // FIXME(#429): Add references to docs and quotes.
5266        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5267    }
5268
5269    /// Gets the bytes of this value mutably.
5270    ///
5271    /// # Examples
5272    ///
5273    /// ```
5274    /// use zerocopy::IntoBytes;
5275    /// # use zerocopy_derive::*;
5276    ///
5277    /// # #[derive(Eq, PartialEq, Debug)]
5278    /// #[derive(FromBytes, IntoBytes, Immutable)]
5279    /// #[repr(C)]
5280    /// struct PacketHeader {
5281    ///     src_port: [u8; 2],
5282    ///     dst_port: [u8; 2],
5283    ///     length: [u8; 2],
5284    ///     checksum: [u8; 2],
5285    /// }
5286    ///
5287    /// let mut header = PacketHeader {
5288    ///     src_port: [0, 1],
5289    ///     dst_port: [2, 3],
5290    ///     length: [4, 5],
5291    ///     checksum: [6, 7],
5292    /// };
5293    ///
5294    /// let bytes = header.as_mut_bytes();
5295    ///
5296    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5297    ///
5298    /// bytes.reverse();
5299    ///
5300    /// assert_eq!(header, PacketHeader {
5301    ///     src_port: [7, 6],
5302    ///     dst_port: [5, 4],
5303    ///     length: [3, 2],
5304    ///     checksum: [1, 0],
5305    /// });
5306    /// ```
5307    #[must_use = "has no side effects"]
5308    #[inline(always)]
5309    fn as_mut_bytes(&mut self) -> &mut [u8]
5310    where
5311        Self: FromBytes,
5312    {
5313        // Note that this method does not have a `Self: Sized` bound;
5314        // `size_of_val` works for unsized values too.
5315        let len = mem::size_of_val(self);
5316        let slf: *mut Self = self;
5317
5318        // SAFETY:
5319        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5320        //   size_of::<u8>()` many bytes because...
5321        //   - `slf` is the same pointer as `self`, and `self` is a reference
5322        //     which points to an object whose size is `len`. Thus...
5323        //     - The entire region of `len` bytes starting at `slf` is contained
5324        //       within a single allocation.
5325        //     - `slf` is non-null.
5326        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5327        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5328        //   initialized.
5329        // - `Self: FromBytes` ensures that no write to this memory region
5330        //   could result in it containing an invalid `Self`.
5331        // - Since `slf` is derived from `self`, and `self` is a mutable
5332        //   reference, no other references to this memory region can exist.
5333        // - The total size of the resulting slice is no larger than
5334        //   `isize::MAX` because no allocation produced by safe code can be
5335        //   larger than `isize::MAX`.
5336        //
5337        // FIXME(#429): Add references to docs and quotes.
5338        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5339    }
5340
5341    /// Writes a copy of `self` to `dst`.
5342    ///
5343    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5344    ///
5345    /// # Examples
5346    ///
5347    /// ```
5348    /// use zerocopy::IntoBytes;
5349    /// # use zerocopy_derive::*;
5350    ///
5351    /// #[derive(IntoBytes, Immutable)]
5352    /// #[repr(C)]
5353    /// struct PacketHeader {
5354    ///     src_port: [u8; 2],
5355    ///     dst_port: [u8; 2],
5356    ///     length: [u8; 2],
5357    ///     checksum: [u8; 2],
5358    /// }
5359    ///
5360    /// let header = PacketHeader {
5361    ///     src_port: [0, 1],
5362    ///     dst_port: [2, 3],
5363    ///     length: [4, 5],
5364    ///     checksum: [6, 7],
5365    /// };
5366    ///
5367    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5368    ///
5369    /// header.write_to(&mut bytes[..]);
5370    ///
5371    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5372    /// ```
5373    ///
5374    /// If too many or too few target bytes are provided, `write_to` returns
5375    /// `Err` and leaves the target bytes unmodified:
5376    ///
5377    /// ```
5378    /// # use zerocopy::IntoBytes;
5379    /// # let header = u128::MAX;
5380    /// let mut excessive_bytes = &mut [0u8; 128][..];
5381    ///
5382    /// let write_result = header.write_to(excessive_bytes);
5383    ///
5384    /// assert!(write_result.is_err());
5385    /// assert_eq!(excessive_bytes, [0u8; 128]);
5386    /// ```
5387    #[must_use = "callers should check the return value to see if the operation succeeded"]
5388    #[inline]
5389    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5390    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5391    where
5392        Self: Immutable,
5393    {
5394        let src = self.as_bytes();
5395        if dst.len() == src.len() {
5396            // SAFETY: Within this branch of the conditional, we have ensured
5397            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5398            // source nor the size of the destination change between the above
5399            // size check and the invocation of `copy_unchecked`.
5400            unsafe { util::copy_unchecked(src, dst) }
5401            Ok(())
5402        } else {
5403            Err(SizeError::new(self))
5404        }
5405    }
5406
5407    /// Writes a copy of `self` to the prefix of `dst`.
5408    ///
5409    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5410    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5411    ///
5412    /// # Examples
5413    ///
5414    /// ```
5415    /// use zerocopy::IntoBytes;
5416    /// # use zerocopy_derive::*;
5417    ///
5418    /// #[derive(IntoBytes, Immutable)]
5419    /// #[repr(C)]
5420    /// struct PacketHeader {
5421    ///     src_port: [u8; 2],
5422    ///     dst_port: [u8; 2],
5423    ///     length: [u8; 2],
5424    ///     checksum: [u8; 2],
5425    /// }
5426    ///
5427    /// let header = PacketHeader {
5428    ///     src_port: [0, 1],
5429    ///     dst_port: [2, 3],
5430    ///     length: [4, 5],
5431    ///     checksum: [6, 7],
5432    /// };
5433    ///
5434    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5435    ///
5436    /// header.write_to_prefix(&mut bytes[..]);
5437    ///
5438    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5439    /// ```
5440    ///
5441    /// If insufficient target bytes are provided, `write_to_prefix` returns
5442    /// `Err` and leaves the target bytes unmodified:
5443    ///
5444    /// ```
5445    /// # use zerocopy::IntoBytes;
5446    /// # let header = u128::MAX;
5447    /// let mut insufficient_bytes = &mut [0, 0][..];
5448    ///
5449    /// let write_result = header.write_to_suffix(insufficient_bytes);
5450    ///
5451    /// assert!(write_result.is_err());
5452    /// assert_eq!(insufficient_bytes, [0, 0]);
5453    /// ```
5454    #[must_use = "callers should check the return value to see if the operation succeeded"]
5455    #[inline]
5456    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5457    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5458    where
5459        Self: Immutable,
5460    {
5461        let src = self.as_bytes();
5462        match dst.get_mut(..src.len()) {
5463            Some(dst) => {
5464                // SAFETY: Within this branch of the `match`, we have ensured
5465                // through fallible subslicing that `dst.len()` is equal to
5466                // `src.len()`. Neither the size of the source nor the size of
5467                // the destination change between the above subslicing operation
5468                // and the invocation of `copy_unchecked`.
5469                unsafe { util::copy_unchecked(src, dst) }
5470                Ok(())
5471            }
5472            None => Err(SizeError::new(self)),
5473        }
5474    }
5475
5476    /// Writes a copy of `self` to the suffix of `dst`.
5477    ///
5478    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5479    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5480    ///
5481    /// # Examples
5482    ///
5483    /// ```
5484    /// use zerocopy::IntoBytes;
5485    /// # use zerocopy_derive::*;
5486    ///
5487    /// #[derive(IntoBytes, Immutable)]
5488    /// #[repr(C)]
5489    /// struct PacketHeader {
5490    ///     src_port: [u8; 2],
5491    ///     dst_port: [u8; 2],
5492    ///     length: [u8; 2],
5493    ///     checksum: [u8; 2],
5494    /// }
5495    ///
5496    /// let header = PacketHeader {
5497    ///     src_port: [0, 1],
5498    ///     dst_port: [2, 3],
5499    ///     length: [4, 5],
5500    ///     checksum: [6, 7],
5501    /// };
5502    ///
5503    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5504    ///
5505    /// header.write_to_suffix(&mut bytes[..]);
5506    ///
5507    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5508    ///
5509    /// let mut insufficient_bytes = &mut [0, 0][..];
5510    ///
5511    /// let write_result = header.write_to_suffix(insufficient_bytes);
5512    ///
5513    /// assert!(write_result.is_err());
5514    /// assert_eq!(insufficient_bytes, [0, 0]);
5515    /// ```
5516    ///
5517    /// If insufficient target bytes are provided, `write_to_suffix` returns
5518    /// `Err` and leaves the target bytes unmodified:
5519    ///
5520    /// ```
5521    /// # use zerocopy::IntoBytes;
5522    /// # let header = u128::MAX;
5523    /// let mut insufficient_bytes = &mut [0, 0][..];
5524    ///
5525    /// let write_result = header.write_to_suffix(insufficient_bytes);
5526    ///
5527    /// assert!(write_result.is_err());
5528    /// assert_eq!(insufficient_bytes, [0, 0]);
5529    /// ```
5530    #[must_use = "callers should check the return value to see if the operation succeeded"]
5531    #[inline]
5532    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5533    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5534    where
5535        Self: Immutable,
5536    {
5537        let src = self.as_bytes();
5538        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5539            start
5540        } else {
5541            return Err(SizeError::new(self));
5542        };
5543        let dst = if let Some(dst) = dst.get_mut(start..) {
5544            dst
5545        } else {
5546            // get_mut() should never return None here. We return a `SizeError`
5547            // rather than .unwrap() because in the event the branch is not
5548            // optimized away, returning a value is generally lighter-weight
5549            // than panicking.
5550            return Err(SizeError::new(self));
5551        };
5552        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5553        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5554        // nor the size of the destination change between the above subslicing
5555        // operation and the invocation of `copy_unchecked`.
5556        unsafe {
5557            util::copy_unchecked(src, dst);
5558        }
5559        Ok(())
5560    }
5561
5562    /// Writes a copy of `self` to an `io::Write`.
5563    ///
5564    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5565    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5566    ///
5567    /// # Examples
5568    ///
5569    /// ```no_run
5570    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5571    /// use std::fs::File;
5572    /// # use zerocopy_derive::*;
5573    ///
5574    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5575    /// #[repr(C, packed)]
5576    /// struct GrayscaleImage {
5577    ///     height: U16,
5578    ///     width: U16,
5579    ///     pixels: [U16],
5580    /// }
5581    ///
5582    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5583    /// let mut file = File::create("image.bin").unwrap();
5584    /// image.write_to_io(&mut file).unwrap();
5585    /// ```
5586    ///
5587    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5588    /// have occurred; e.g.:
5589    ///
5590    /// ```
5591    /// # use zerocopy::IntoBytes;
5592    ///
5593    /// let src = u128::MAX;
5594    /// let mut dst = [0u8; 2];
5595    ///
5596    /// let write_result = src.write_to_io(&mut dst[..]);
5597    ///
5598    /// assert!(write_result.is_err());
5599    /// assert_eq!(dst, [255, 255]);
5600    /// ```
5601    #[cfg(feature = "std")]
5602    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5603    #[inline(always)]
5604    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5605    where
5606        Self: Immutable,
5607        W: io::Write,
5608    {
5609        dst.write_all(self.as_bytes())
5610    }
5611
5612    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5613    #[doc(hidden)]
5614    #[inline]
5615    fn as_bytes_mut(&mut self) -> &mut [u8]
5616    where
5617        Self: FromBytes,
5618    {
5619        self.as_mut_bytes()
5620    }
5621}
5622
5623/// Analyzes whether a type is [`Unaligned`].
5624///
5625/// This derive analyzes, at compile time, whether the annotated type satisfies
5626/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5627/// sound to do so. This derive can be applied to structs, enums, and unions;
5628/// e.g.:
5629///
5630/// ```
5631/// # use zerocopy_derive::Unaligned;
5632/// #[derive(Unaligned)]
5633/// #[repr(C)]
5634/// struct MyStruct {
5635/// # /*
5636///     ...
5637/// # */
5638/// }
5639///
5640/// #[derive(Unaligned)]
5641/// #[repr(u8)]
5642/// enum MyEnum {
5643/// #   Variant0,
5644/// # /*
5645///     ...
5646/// # */
5647/// }
5648///
5649/// #[derive(Unaligned)]
5650/// #[repr(packed)]
5651/// union MyUnion {
5652/// #   variant: u8,
5653/// # /*
5654///     ...
5655/// # */
5656/// }
5657/// ```
5658///
5659/// # Analysis
5660///
5661/// *This section describes, roughly, the analysis performed by this derive to
5662/// determine whether it is sound to implement `Unaligned` for a given type.
5663/// Unless you are modifying the implementation of this derive, or attempting to
5664/// manually implement `Unaligned` for a type yourself, you don't need to read
5665/// this section.*
5666///
5667/// If a type has the following properties, then this derive can implement
5668/// `Unaligned` for that type:
5669///
5670/// - If the type is a struct or union:
5671///   - If `repr(align(N))` is provided, `N` must equal 1.
5672///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5673///     [`Unaligned`].
5674///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5675///     `repr(packed)` or `repr(packed(1))`.
5676/// - If the type is an enum:
5677///   - If `repr(align(N))` is provided, `N` must equal 1.
5678///   - It must be a field-less enum (meaning that all variants have no fields).
5679///   - It must be `repr(i8)` or `repr(u8)`.
5680///
5681/// [safety conditions]: trait@Unaligned#safety
5682#[cfg(any(feature = "derive", test))]
5683#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5684pub use zerocopy_derive::Unaligned;
5685
5686/// Types with no alignment requirement.
5687///
5688/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5689///
5690/// # Implementation
5691///
5692/// **Do not implement this trait yourself!** Instead, use
5693/// [`#[derive(Unaligned)]`][derive]; e.g.:
5694///
5695/// ```
5696/// # use zerocopy_derive::Unaligned;
5697/// #[derive(Unaligned)]
5698/// #[repr(C)]
5699/// struct MyStruct {
5700/// # /*
5701///     ...
5702/// # */
5703/// }
5704///
5705/// #[derive(Unaligned)]
5706/// #[repr(u8)]
5707/// enum MyEnum {
5708/// #   Variant0,
5709/// # /*
5710///     ...
5711/// # */
5712/// }
5713///
5714/// #[derive(Unaligned)]
5715/// #[repr(packed)]
5716/// union MyUnion {
5717/// #   variant: u8,
5718/// # /*
5719///     ...
5720/// # */
5721/// }
5722/// ```
5723///
5724/// This derive performs a sophisticated, compile-time safety analysis to
5725/// determine whether a type is `Unaligned`.
5726///
5727/// # Safety
5728///
5729/// *This section describes what is required in order for `T: Unaligned`, and
5730/// what unsafe code may assume of such types. If you don't plan on implementing
5731/// `Unaligned` manually, and you don't plan on writing unsafe code that
5732/// operates on `Unaligned` types, then you don't need to read this section.*
5733///
5734/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5735/// reference to `T` at any memory location regardless of alignment. If a type
5736/// is marked as `Unaligned` which violates this contract, it may cause
5737/// undefined behavior.
5738///
5739/// `#[derive(Unaligned)]` only permits [types which satisfy these
5740/// requirements][derive-analysis].
5741///
5742#[cfg_attr(
5743    feature = "derive",
5744    doc = "[derive]: zerocopy_derive::Unaligned",
5745    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5746)]
5747#[cfg_attr(
5748    not(feature = "derive"),
5749    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5750    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5751)]
5752#[cfg_attr(
5753    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5754    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5755)]
5756pub unsafe trait Unaligned {
5757    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5758    // safe.
5759    #[doc(hidden)]
5760    fn only_derive_is_allowed_to_implement_this_trait()
5761    where
5762        Self: Sized;
5763}
5764
5765/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5766///
5767/// This derive can be applied to structs and enums implementing both
5768/// [`Immutable`] and [`IntoBytes`]; e.g.:
5769///
5770/// ```
5771/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5772/// #[derive(ByteEq, Immutable, IntoBytes)]
5773/// #[repr(C)]
5774/// struct MyStruct {
5775/// # /*
5776///     ...
5777/// # */
5778/// }
5779///
5780/// #[derive(ByteEq, Immutable, IntoBytes)]
5781/// #[repr(u8)]
5782/// enum MyEnum {
5783/// #   Variant,
5784/// # /*
5785///     ...
5786/// # */
5787/// }
5788/// ```
5789///
5790/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5791/// equality by individually comparing each field. Instead, the implementation
5792/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
5793/// `self` and `other` to byte slices and compares those slices for equality.
5794/// This may have performance advantages.
5795#[cfg(any(feature = "derive", test))]
5796#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5797pub use zerocopy_derive::ByteEq;
5798/// Derives an optimized [`Hash`] implementation.
5799///
5800/// This derive can be applied to structs and enums implementing both
5801/// [`Immutable`] and [`IntoBytes`]; e.g.:
5802///
5803/// ```
5804/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5805/// #[derive(ByteHash, Immutable, IntoBytes)]
5806/// #[repr(C)]
5807/// struct MyStruct {
5808/// # /*
5809///     ...
5810/// # */
5811/// }
5812///
5813/// #[derive(ByteHash, Immutable, IntoBytes)]
5814/// #[repr(u8)]
5815/// enum MyEnum {
5816/// #   Variant,
5817/// # /*
5818///     ...
5819/// # */
5820/// }
5821/// ```
5822///
5823/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5824/// individually hashing each field and combining the results. Instead, the
5825/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5826/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
5827/// it in a single call to [`Hasher::write()`]. This may have performance
5828/// advantages.
5829///
5830/// [`Hash`]: core::hash::Hash
5831/// [`Hash::hash()`]: core::hash::Hash::hash()
5832/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5833#[cfg(any(feature = "derive", test))]
5834#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5835pub use zerocopy_derive::ByteHash;
5836/// Implements [`SplitAt`].
5837///
5838/// This derive can be applied to structs; e.g.:
5839///
5840/// ```
5841/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5842/// #[derive(ByteEq, Immutable, IntoBytes)]
5843/// #[repr(C)]
5844/// struct MyStruct {
5845/// # /*
5846///     ...
5847/// # */
5848/// }
5849/// ```
5850#[cfg(any(feature = "derive", test))]
5851#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5852pub use zerocopy_derive::SplitAt;
5853
5854#[cfg(feature = "alloc")]
5855#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5856#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5857mod alloc_support {
5858    use super::*;
5859
5860    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5861    /// vector. The new items are initialized with zeros.
5862    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5863    #[doc(hidden)]
5864    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5865    #[inline(always)]
5866    pub fn extend_vec_zeroed<T: FromZeros>(
5867        v: &mut Vec<T>,
5868        additional: usize,
5869    ) -> Result<(), AllocError> {
5870        <T as FromZeros>::extend_vec_zeroed(v, additional)
5871    }
5872
5873    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5874    /// items are initialized with zeros.
5875    ///
5876    /// # Panics
5877    ///
5878    /// Panics if `position > v.len()`.
5879    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5880    #[doc(hidden)]
5881    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5882    #[inline(always)]
5883    pub fn insert_vec_zeroed<T: FromZeros>(
5884        v: &mut Vec<T>,
5885        position: usize,
5886        additional: usize,
5887    ) -> Result<(), AllocError> {
5888        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5889    }
5890}
5891
5892#[cfg(feature = "alloc")]
5893#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5894#[doc(hidden)]
5895pub use alloc_support::*;
5896
5897#[cfg(test)]
5898#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5899mod tests {
5900    use static_assertions::assert_impl_all;
5901
5902    use super::*;
5903    use crate::util::testutil::*;
5904
5905    // An unsized type.
5906    //
5907    // This is used to test the custom derives of our traits. The `[u8]` type
5908    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5909    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5910    #[repr(transparent)]
5911    struct Unsized([u8]);
5912
5913    impl Unsized {
5914        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5915            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5916            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5917            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5918            // guaranteed by the language spec, we can just change this since
5919            // it's in test code.
5920            //
5921            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5922            unsafe { mem::transmute(slc) }
5923        }
5924    }
5925
5926    #[test]
5927    fn test_known_layout() {
5928        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5929        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5930        // of `$ty`.
5931        macro_rules! test {
5932            ($ty:ty, $expect:expr) => {
5933                let expect = $expect;
5934                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5935                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5936                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5937            };
5938        }
5939
5940        let layout =
5941            |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
5942                align: NonZeroUsize::new(align).unwrap(),
5943                size_info: match trailing_slice_elem_size {
5944                    None => SizeInfo::Sized { size: offset },
5945                    Some(elem_size) => {
5946                        SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
5947                    }
5948                },
5949                statically_shallow_unpadded,
5950            };
5951
5952        test!((), layout(0, 1, None, false));
5953        test!(u8, layout(1, 1, None, false));
5954        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5955        // platforms.
5956        test!(u64, layout(8, mem::align_of::<u64>(), None, false));
5957        test!(AU64, layout(8, 8, None, false));
5958
5959        test!(Option<&'static ()>, usize::LAYOUT);
5960
5961        test!([()], layout(0, 1, Some(0), true));
5962        test!([u8], layout(0, 1, Some(1), true));
5963        test!(str, layout(0, 1, Some(1), true));
5964    }
5965
5966    #[cfg(feature = "derive")]
5967    #[test]
5968    fn test_known_layout_derive() {
5969        // In this and other files (`late_compile_pass.rs`,
5970        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5971        // modes of `derive(KnownLayout)` for the following combination of
5972        // properties:
5973        //
5974        // +------------+--------------------------------------+-----------+
5975        // |            |      trailing field properties       |           |
5976        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5977        // |------------+----------+----------------+----------+-----------|
5978        // |          N |        N |              N |        N |      KL00 |
5979        // |          N |        N |              N |        Y |      KL01 |
5980        // |          N |        N |              Y |        N |      KL02 |
5981        // |          N |        N |              Y |        Y |      KL03 |
5982        // |          N |        Y |              N |        N |      KL04 |
5983        // |          N |        Y |              N |        Y |      KL05 |
5984        // |          N |        Y |              Y |        N |      KL06 |
5985        // |          N |        Y |              Y |        Y |      KL07 |
5986        // |          Y |        N |              N |        N |      KL08 |
5987        // |          Y |        N |              N |        Y |      KL09 |
5988        // |          Y |        N |              Y |        N |      KL10 |
5989        // |          Y |        N |              Y |        Y |      KL11 |
5990        // |          Y |        Y |              N |        N |      KL12 |
5991        // |          Y |        Y |              N |        Y |      KL13 |
5992        // |          Y |        Y |              Y |        N |      KL14 |
5993        // |          Y |        Y |              Y |        Y |      KL15 |
5994        // +------------+----------+----------------+----------+-----------+
5995
5996        struct NotKnownLayout<T = ()> {
5997            _t: T,
5998        }
5999
6000        #[derive(KnownLayout)]
6001        #[repr(C)]
6002        struct AlignSize<const ALIGN: usize, const SIZE: usize>
6003        where
6004            elain::Align<ALIGN>: elain::Alignment,
6005        {
6006            _align: elain::Align<ALIGN>,
6007            size: [u8; SIZE],
6008        }
6009
6010        type AU16 = AlignSize<2, 2>;
6011        type AU32 = AlignSize<4, 4>;
6012
6013        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6014
6015        let sized_layout = |align, size| DstLayout {
6016            align: NonZeroUsize::new(align).unwrap(),
6017            size_info: SizeInfo::Sized { size },
6018            statically_shallow_unpadded: false,
6019        };
6020
6021        let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
6022            align: NonZeroUsize::new(align).unwrap(),
6023            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
6024            statically_shallow_unpadded,
6025        };
6026
6027        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6028        // |          N |        N |              N |        Y |      KL01 |
6029        #[allow(dead_code)]
6030        #[derive(KnownLayout)]
6031        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6032
6033        let expected = DstLayout::for_type::<KL01>();
6034
6035        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6036        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6037
6038        // ...with `align(N)`:
6039        #[allow(dead_code)]
6040        #[derive(KnownLayout)]
6041        #[repr(align(64))]
6042        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6043
6044        let expected = DstLayout::for_type::<KL01Align>();
6045
6046        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6047        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6048
6049        // ...with `packed`:
6050        #[allow(dead_code)]
6051        #[derive(KnownLayout)]
6052        #[repr(packed)]
6053        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6054
6055        let expected = DstLayout::for_type::<KL01Packed>();
6056
6057        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6058        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6059
6060        // ...with `packed(N)`:
6061        #[allow(dead_code)]
6062        #[derive(KnownLayout)]
6063        #[repr(packed(2))]
6064        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6065
6066        assert_impl_all!(KL01PackedN: KnownLayout);
6067
6068        let expected = DstLayout::for_type::<KL01PackedN>();
6069
6070        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6071        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6072
6073        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6074        // |          N |        N |              Y |        Y |      KL03 |
6075        #[allow(dead_code)]
6076        #[derive(KnownLayout)]
6077        struct KL03(NotKnownLayout, u8);
6078
6079        let expected = DstLayout::for_type::<KL03>();
6080
6081        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6082        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6083
6084        // ... with `align(N)`
6085        #[allow(dead_code)]
6086        #[derive(KnownLayout)]
6087        #[repr(align(64))]
6088        struct KL03Align(NotKnownLayout<AU32>, u8);
6089
6090        let expected = DstLayout::for_type::<KL03Align>();
6091
6092        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6093        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6094
6095        // ... with `packed`:
6096        #[allow(dead_code)]
6097        #[derive(KnownLayout)]
6098        #[repr(packed)]
6099        struct KL03Packed(NotKnownLayout<AU32>, u8);
6100
6101        let expected = DstLayout::for_type::<KL03Packed>();
6102
6103        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6104        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6105
6106        // ... with `packed(N)`
6107        #[allow(dead_code)]
6108        #[derive(KnownLayout)]
6109        #[repr(packed(2))]
6110        struct KL03PackedN(NotKnownLayout<AU32>, u8);
6111
6112        assert_impl_all!(KL03PackedN: KnownLayout);
6113
6114        let expected = DstLayout::for_type::<KL03PackedN>();
6115
6116        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6117        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6118
6119        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6120        // |          N |        Y |              N |        Y |      KL05 |
6121        #[allow(dead_code)]
6122        #[derive(KnownLayout)]
6123        struct KL05<T>(u8, T);
6124
6125        fn _test_kl05<T>(t: T) -> impl KnownLayout {
6126            KL05(0u8, t)
6127        }
6128
6129        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6130        // |          N |        Y |              Y |        Y |      KL07 |
6131        #[allow(dead_code)]
6132        #[derive(KnownLayout)]
6133        struct KL07<T: KnownLayout>(u8, T);
6134
6135        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6136            let _ = KL07(0u8, t);
6137        }
6138
6139        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6140        // |          Y |        N |              Y |        N |      KL10 |
6141        #[allow(dead_code)]
6142        #[derive(KnownLayout)]
6143        #[repr(C)]
6144        struct KL10(NotKnownLayout<AU32>, [u8]);
6145
6146        let expected = DstLayout::new_zst(None)
6147            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6148            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6149            .pad_to_align();
6150
6151        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6152        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6153
6154        // ...with `align(N)`:
6155        #[allow(dead_code)]
6156        #[derive(KnownLayout)]
6157        #[repr(C, align(64))]
6158        struct KL10Align(NotKnownLayout<AU32>, [u8]);
6159
6160        let repr_align = NonZeroUsize::new(64);
6161
6162        let expected = DstLayout::new_zst(repr_align)
6163            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6164            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6165            .pad_to_align();
6166
6167        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6168        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6169
6170        // ...with `packed`:
6171        #[allow(dead_code)]
6172        #[derive(KnownLayout)]
6173        #[repr(C, packed)]
6174        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6175
6176        let repr_packed = NonZeroUsize::new(1);
6177
6178        let expected = DstLayout::new_zst(None)
6179            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6180            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6181            .pad_to_align();
6182
6183        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6184        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6185
6186        // ...with `packed(N)`:
6187        #[allow(dead_code)]
6188        #[derive(KnownLayout)]
6189        #[repr(C, packed(2))]
6190        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6191
6192        let repr_packed = NonZeroUsize::new(2);
6193
6194        let expected = DstLayout::new_zst(None)
6195            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6196            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6197            .pad_to_align();
6198
6199        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6200        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6201
6202        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6203        // |          Y |        N |              Y |        Y |      KL11 |
6204        #[allow(dead_code)]
6205        #[derive(KnownLayout)]
6206        #[repr(C)]
6207        struct KL11(NotKnownLayout<AU64>, u8);
6208
6209        let expected = DstLayout::new_zst(None)
6210            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6211            .extend(<u8 as KnownLayout>::LAYOUT, None)
6212            .pad_to_align();
6213
6214        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6215        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6216
6217        // ...with `align(N)`:
6218        #[allow(dead_code)]
6219        #[derive(KnownLayout)]
6220        #[repr(C, align(64))]
6221        struct KL11Align(NotKnownLayout<AU64>, u8);
6222
6223        let repr_align = NonZeroUsize::new(64);
6224
6225        let expected = DstLayout::new_zst(repr_align)
6226            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6227            .extend(<u8 as KnownLayout>::LAYOUT, None)
6228            .pad_to_align();
6229
6230        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6231        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6232
6233        // ...with `packed`:
6234        #[allow(dead_code)]
6235        #[derive(KnownLayout)]
6236        #[repr(C, packed)]
6237        struct KL11Packed(NotKnownLayout<AU64>, u8);
6238
6239        let repr_packed = NonZeroUsize::new(1);
6240
6241        let expected = DstLayout::new_zst(None)
6242            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6243            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6244            .pad_to_align();
6245
6246        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6247        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6248
6249        // ...with `packed(N)`:
6250        #[allow(dead_code)]
6251        #[derive(KnownLayout)]
6252        #[repr(C, packed(2))]
6253        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6254
6255        let repr_packed = NonZeroUsize::new(2);
6256
6257        let expected = DstLayout::new_zst(None)
6258            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6259            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6260            .pad_to_align();
6261
6262        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6263        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6264
6265        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6266        // |          Y |        Y |              Y |        N |      KL14 |
6267        #[allow(dead_code)]
6268        #[derive(KnownLayout)]
6269        #[repr(C)]
6270        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6271
6272        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6273            _assert_kl(kl)
6274        }
6275
6276        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6277        // |          Y |        Y |              Y |        Y |      KL15 |
6278        #[allow(dead_code)]
6279        #[derive(KnownLayout)]
6280        #[repr(C)]
6281        struct KL15<T: KnownLayout>(u8, T);
6282
6283        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6284            let _ = KL15(0u8, t);
6285        }
6286
6287        // Test a variety of combinations of field types:
6288        //  - ()
6289        //  - u8
6290        //  - AU16
6291        //  - [()]
6292        //  - [u8]
6293        //  - [AU16]
6294
6295        #[allow(clippy::upper_case_acronyms, dead_code)]
6296        #[derive(KnownLayout)]
6297        #[repr(C)]
6298        struct KLTU<T, U: ?Sized>(T, U);
6299
6300        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6301
6302        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6303
6304        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6305
6306        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6307
6308        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6309
6310        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6311
6312        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6313
6314        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6315
6316        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6317
6318        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6319
6320        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6321
6322        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6323
6324        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6325
6326        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6327
6328        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6329
6330        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6331
6332        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6333
6334        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6335
6336        // Test a variety of field counts.
6337
6338        #[derive(KnownLayout)]
6339        #[repr(C)]
6340        struct KLF0;
6341
6342        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6343
6344        #[derive(KnownLayout)]
6345        #[repr(C)]
6346        struct KLF1([u8]);
6347
6348        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6349
6350        #[derive(KnownLayout)]
6351        #[repr(C)]
6352        struct KLF2(NotKnownLayout<u8>, [u8]);
6353
6354        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6355
6356        #[derive(KnownLayout)]
6357        #[repr(C)]
6358        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6359
6360        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6361
6362        #[derive(KnownLayout)]
6363        #[repr(C)]
6364        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6365
6366        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6367    }
6368
6369    #[test]
6370    fn test_object_safety() {
6371        fn _takes_immutable(_: &dyn Immutable) {}
6372        fn _takes_unaligned(_: &dyn Unaligned) {}
6373    }
6374
6375    #[test]
6376    fn test_from_zeros_only() {
6377        // Test types that implement `FromZeros` but not `FromBytes`.
6378
6379        assert!(!bool::new_zeroed());
6380        assert_eq!(char::new_zeroed(), '\0');
6381
6382        #[cfg(feature = "alloc")]
6383        {
6384            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6385            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6386
6387            assert_eq!(
6388                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6389                [false, false, false]
6390            );
6391            assert_eq!(
6392                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6393                ['\0', '\0', '\0']
6394            );
6395
6396            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6397            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6398        }
6399
6400        let mut string = "hello".to_string();
6401        let s: &mut str = string.as_mut();
6402        assert_eq!(s, "hello");
6403        s.zero();
6404        assert_eq!(s, "\0\0\0\0\0");
6405    }
6406
6407    #[test]
6408    fn test_zst_count_preserved() {
6409        // Test that, when an explicit count is provided to for a type with a
6410        // ZST trailing slice element, that count is preserved. This is
6411        // important since, for such types, all element counts result in objects
6412        // of the same size, and so the correct behavior is ambiguous. However,
6413        // preserving the count as requested by the user is the behavior that we
6414        // document publicly.
6415
6416        // FromZeros methods
6417        #[cfg(feature = "alloc")]
6418        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6419        #[cfg(feature = "alloc")]
6420        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6421
6422        // FromBytes methods
6423        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6424        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6425        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6426        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6427        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6428        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6429    }
6430
6431    #[test]
6432    fn test_read_write() {
6433        const VAL: u64 = 0x12345678;
6434        #[cfg(target_endian = "big")]
6435        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6436        #[cfg(target_endian = "little")]
6437        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6438        const ZEROS: [u8; 8] = [0u8; 8];
6439
6440        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6441
6442        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6443        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6444        // zeros.
6445        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6446        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6447        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6448        // The first 8 bytes are all zeros and the second 8 bytes are from
6449        // `VAL_BYTES`
6450        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6451        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6452        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6453
6454        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6455
6456        let mut bytes = [0u8; 8];
6457        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6458        assert_eq!(bytes, VAL_BYTES);
6459        let mut bytes = [0u8; 16];
6460        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6461        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6462        assert_eq!(bytes, want);
6463        let mut bytes = [0u8; 16];
6464        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6465        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6466        assert_eq!(bytes, want);
6467    }
6468
6469    #[test]
6470    #[cfg(feature = "std")]
6471    fn test_read_io_with_padding_soundness() {
6472        // This test is designed to exhibit potential UB in
6473        // `FromBytes::read_from_io`. (see #2319, #2320).
6474
6475        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6476        // will have inter-field padding between `x` and `y`.
6477        #[derive(FromBytes)]
6478        #[repr(C)]
6479        struct WithPadding {
6480            x: u8,
6481            y: u16,
6482        }
6483        struct ReadsInRead;
6484        impl std::io::Read for ReadsInRead {
6485            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6486                // This body branches on every byte of `buf`, ensuring that it
6487                // exhibits UB if any byte of `buf` is uninitialized.
6488                if buf.iter().all(|&x| x == 0) {
6489                    Ok(buf.len())
6490                } else {
6491                    buf.iter_mut().for_each(|x| *x = 0);
6492                    Ok(buf.len())
6493                }
6494            }
6495        }
6496        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6497    }
6498
6499    #[test]
6500    #[cfg(feature = "std")]
6501    fn test_read_write_io() {
6502        let mut long_buffer = [0, 0, 0, 0];
6503        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6504        assert_eq!(long_buffer, [255, 255, 0, 0]);
6505        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6506
6507        let mut short_buffer = [0, 0];
6508        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6509        assert_eq!(short_buffer, [255, 255]);
6510        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6511    }
6512
6513    #[test]
6514    fn test_try_from_bytes_try_read_from() {
6515        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6516        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6517
6518        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6519        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6520
6521        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6522        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6523
6524        // If we don't pass enough bytes, it fails.
6525        assert!(matches!(
6526            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6527            Err(TryReadError::Size(_))
6528        ));
6529        assert!(matches!(
6530            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6531            Err(TryReadError::Size(_))
6532        ));
6533        assert!(matches!(
6534            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6535            Err(TryReadError::Size(_))
6536        ));
6537
6538        // If we pass too many bytes, it fails.
6539        assert!(matches!(
6540            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6541            Err(TryReadError::Size(_))
6542        ));
6543
6544        // If we pass an invalid value, it fails.
6545        assert!(matches!(
6546            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6547            Err(TryReadError::Validity(_))
6548        ));
6549        assert!(matches!(
6550            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6551            Err(TryReadError::Validity(_))
6552        ));
6553        assert!(matches!(
6554            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6555            Err(TryReadError::Validity(_))
6556        ));
6557
6558        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6559        // alignment is 8, and since we read from two adjacent addresses one
6560        // byte apart, it is guaranteed that at least one of them (though
6561        // possibly both) will be misaligned.
6562        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6563        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6564        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6565
6566        assert_eq!(
6567            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6568            Ok((AU64(0), &[][..]))
6569        );
6570        assert_eq!(
6571            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6572            Ok((AU64(0), &[][..]))
6573        );
6574
6575        assert_eq!(
6576            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6577            Ok((&[][..], AU64(0)))
6578        );
6579        assert_eq!(
6580            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6581            Ok((&[][..], AU64(0)))
6582        );
6583    }
6584
6585    #[test]
6586    fn test_ref_from_mut_from_bytes() {
6587        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6588        // success cases. Exhaustive coverage for these methods is covered by
6589        // the `Ref` tests above, which these helper methods defer to.
6590
6591        let mut buf =
6592            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6593
6594        assert_eq!(
6595            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6596            [8, 9, 10, 11, 12, 13, 14, 15]
6597        );
6598        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6599        suffix.0 = 0x0101010101010101;
6600        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6601        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6602        assert_eq!(
6603            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6604            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6605        );
6606        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6607        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6608        suffix.0 = 0x0202020202020202;
6609        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6610        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6611        suffix[0] = 42;
6612        assert_eq!(
6613            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6614            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6615        );
6616        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6617        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6618    }
6619
6620    #[test]
6621    fn test_ref_from_mut_from_bytes_error() {
6622        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6623        // error cases.
6624
6625        // Fail because the buffer is too large.
6626        let mut buf = Align::<[u8; 16], AU64>::default();
6627        // `buf.t` should be aligned to 8, so only the length check should fail.
6628        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6629        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6630        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6631        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6632
6633        // Fail because the buffer is too small.
6634        let mut buf = Align::<[u8; 4], AU64>::default();
6635        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6636        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6637        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6638        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6639        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6640        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6641        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6642        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6643        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6644        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6645        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6646        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6647
6648        // Fail because the alignment is insufficient.
6649        let mut buf = Align::<[u8; 13], AU64>::default();
6650        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6651        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6652        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6653        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6654        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6655        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6656        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6657        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6658    }
6659
6660    #[test]
6661    fn test_to_methods() {
6662        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6663        ///
6664        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6665        /// before `t` has been modified. `post_mutation` is the expected
6666        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6667        /// has had its bits flipped (by applying `^= 0xFF`).
6668        ///
6669        /// `N` is the size of `t` in bytes.
6670        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6671            t: &mut T,
6672            bytes: &[u8],
6673            post_mutation: &T,
6674        ) {
6675            // Test that we can access the underlying bytes, and that we get the
6676            // right bytes and the right number of bytes.
6677            assert_eq!(t.as_bytes(), bytes);
6678
6679            // Test that changes to the underlying byte slices are reflected in
6680            // the original object.
6681            t.as_mut_bytes()[0] ^= 0xFF;
6682            assert_eq!(t, post_mutation);
6683            t.as_mut_bytes()[0] ^= 0xFF;
6684
6685            // `write_to` rejects slices that are too small or too large.
6686            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6687            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6688
6689            // `write_to` works as expected.
6690            let mut bytes = [0; N];
6691            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6692            assert_eq!(bytes, t.as_bytes());
6693
6694            // `write_to_prefix` rejects slices that are too small.
6695            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6696
6697            // `write_to_prefix` works with exact-sized slices.
6698            let mut bytes = [0; N];
6699            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6700            assert_eq!(bytes, t.as_bytes());
6701
6702            // `write_to_prefix` works with too-large slices, and any bytes past
6703            // the prefix aren't modified.
6704            let mut too_many_bytes = vec![0; N + 1];
6705            too_many_bytes[N] = 123;
6706            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6707            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6708            assert_eq!(too_many_bytes[N], 123);
6709
6710            // `write_to_suffix` rejects slices that are too small.
6711            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6712
6713            // `write_to_suffix` works with exact-sized slices.
6714            let mut bytes = [0; N];
6715            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6716            assert_eq!(bytes, t.as_bytes());
6717
6718            // `write_to_suffix` works with too-large slices, and any bytes
6719            // before the suffix aren't modified.
6720            let mut too_many_bytes = vec![0; N + 1];
6721            too_many_bytes[0] = 123;
6722            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6723            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6724            assert_eq!(too_many_bytes[0], 123);
6725        }
6726
6727        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6728        #[repr(C)]
6729        struct Foo {
6730            a: u32,
6731            b: Wrapping<u32>,
6732            c: Option<NonZeroU32>,
6733        }
6734
6735        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6736            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6737        } else {
6738            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6739        };
6740        let post_mutation_expected_a =
6741            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6742        test::<_, 12>(
6743            &mut Foo { a: 1, b: Wrapping(2), c: None },
6744            expected_bytes.as_bytes(),
6745            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6746        );
6747        test::<_, 3>(
6748            Unsized::from_mut_slice(&mut [1, 2, 3]),
6749            &[1, 2, 3],
6750            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6751        );
6752    }
6753
6754    #[test]
6755    fn test_array() {
6756        #[derive(FromBytes, IntoBytes, Immutable)]
6757        #[repr(C)]
6758        struct Foo {
6759            a: [u16; 33],
6760        }
6761
6762        let foo = Foo { a: [0xFFFF; 33] };
6763        let expected = [0xFFu8; 66];
6764        assert_eq!(foo.as_bytes(), &expected[..]);
6765    }
6766
6767    #[test]
6768    fn test_new_zeroed() {
6769        assert!(!bool::new_zeroed());
6770        assert_eq!(u64::new_zeroed(), 0);
6771        // This test exists in order to exercise unsafe code, especially when
6772        // running under Miri.
6773        #[allow(clippy::unit_cmp)]
6774        {
6775            assert_eq!(<()>::new_zeroed(), ());
6776        }
6777    }
6778
6779    #[test]
6780    fn test_transparent_packed_generic_struct() {
6781        #[derive(IntoBytes, FromBytes, Unaligned)]
6782        #[repr(transparent)]
6783        #[allow(dead_code)] // We never construct this type
6784        struct Foo<T> {
6785            _t: T,
6786            _phantom: PhantomData<()>,
6787        }
6788
6789        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6790        assert_impl_all!(Foo<u8>: Unaligned);
6791
6792        #[derive(IntoBytes, FromBytes, Unaligned)]
6793        #[repr(C, packed)]
6794        #[allow(dead_code)] // We never construct this type
6795        struct Bar<T, U> {
6796            _t: T,
6797            _u: U,
6798        }
6799
6800        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6801    }
6802
6803    #[cfg(feature = "alloc")]
6804    mod alloc {
6805        use super::*;
6806
6807        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6808        #[test]
6809        fn test_extend_vec_zeroed() {
6810            // Test extending when there is an existing allocation.
6811            let mut v = vec![100u16, 200, 300];
6812            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6813            assert_eq!(v.len(), 6);
6814            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6815            drop(v);
6816
6817            // Test extending when there is no existing allocation.
6818            let mut v: Vec<u64> = Vec::new();
6819            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6820            assert_eq!(v.len(), 3);
6821            assert_eq!(&*v, &[0, 0, 0]);
6822            drop(v);
6823        }
6824
6825        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6826        #[test]
6827        fn test_extend_vec_zeroed_zst() {
6828            // Test extending when there is an existing (fake) allocation.
6829            let mut v = vec![(), (), ()];
6830            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6831            assert_eq!(v.len(), 6);
6832            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6833            drop(v);
6834
6835            // Test extending when there is no existing (fake) allocation.
6836            let mut v: Vec<()> = Vec::new();
6837            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6838            assert_eq!(&*v, &[(), (), ()]);
6839            drop(v);
6840        }
6841
6842        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6843        #[test]
6844        fn test_insert_vec_zeroed() {
6845            // Insert at start (no existing allocation).
6846            let mut v: Vec<u64> = Vec::new();
6847            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6848            assert_eq!(v.len(), 2);
6849            assert_eq!(&*v, &[0, 0]);
6850            drop(v);
6851
6852            // Insert at start.
6853            let mut v = vec![100u64, 200, 300];
6854            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6855            assert_eq!(v.len(), 5);
6856            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6857            drop(v);
6858
6859            // Insert at middle.
6860            let mut v = vec![100u64, 200, 300];
6861            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6862            assert_eq!(v.len(), 4);
6863            assert_eq!(&*v, &[100, 0, 200, 300]);
6864            drop(v);
6865
6866            // Insert at end.
6867            let mut v = vec![100u64, 200, 300];
6868            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6869            assert_eq!(v.len(), 4);
6870            assert_eq!(&*v, &[100, 200, 300, 0]);
6871            drop(v);
6872        }
6873
6874        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6875        #[test]
6876        fn test_insert_vec_zeroed_zst() {
6877            // Insert at start (no existing fake allocation).
6878            let mut v: Vec<()> = Vec::new();
6879            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6880            assert_eq!(v.len(), 2);
6881            assert_eq!(&*v, &[(), ()]);
6882            drop(v);
6883
6884            // Insert at start.
6885            let mut v = vec![(), (), ()];
6886            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6887            assert_eq!(v.len(), 5);
6888            assert_eq!(&*v, &[(), (), (), (), ()]);
6889            drop(v);
6890
6891            // Insert at middle.
6892            let mut v = vec![(), (), ()];
6893            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6894            assert_eq!(v.len(), 4);
6895            assert_eq!(&*v, &[(), (), (), ()]);
6896            drop(v);
6897
6898            // Insert at end.
6899            let mut v = vec![(), (), ()];
6900            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6901            assert_eq!(v.len(), 4);
6902            assert_eq!(&*v, &[(), (), (), ()]);
6903            drop(v);
6904        }
6905
6906        #[test]
6907        fn test_new_box_zeroed() {
6908            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6909        }
6910
6911        #[test]
6912        fn test_new_box_zeroed_array() {
6913            drop(<[u32; 0x1000]>::new_box_zeroed());
6914        }
6915
6916        #[test]
6917        fn test_new_box_zeroed_zst() {
6918            // This test exists in order to exercise unsafe code, especially
6919            // when running under Miri.
6920            #[allow(clippy::unit_cmp)]
6921            {
6922                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6923            }
6924        }
6925
6926        #[test]
6927        fn test_new_box_zeroed_with_elems() {
6928            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6929            assert_eq!(s.len(), 3);
6930            assert_eq!(&*s, &[0, 0, 0]);
6931            s[1] = 3;
6932            assert_eq!(&*s, &[0, 3, 0]);
6933        }
6934
6935        #[test]
6936        fn test_new_box_zeroed_with_elems_empty() {
6937            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6938            assert_eq!(s.len(), 0);
6939        }
6940
6941        #[test]
6942        fn test_new_box_zeroed_with_elems_zst() {
6943            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6944            assert_eq!(s.len(), 3);
6945            assert!(s.get(10).is_none());
6946            // This test exists in order to exercise unsafe code, especially
6947            // when running under Miri.
6948            #[allow(clippy::unit_cmp)]
6949            {
6950                assert_eq!(s[1], ());
6951            }
6952            s[2] = ();
6953        }
6954
6955        #[test]
6956        fn test_new_box_zeroed_with_elems_zst_empty() {
6957            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6958            assert_eq!(s.len(), 0);
6959        }
6960
6961        #[test]
6962        fn new_box_zeroed_with_elems_errors() {
6963            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6964
6965            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6966            assert_eq!(
6967                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6968                Err(AllocError)
6969            );
6970        }
6971    }
6972
6973    #[test]
6974    #[allow(deprecated)]
6975    fn test_deprecated_from_bytes() {
6976        let val = 0u32;
6977        let bytes = val.as_bytes();
6978
6979        assert!(u32::ref_from(bytes).is_some());
6980        // mut_from needs mut bytes
6981        let mut val = 0u32;
6982        let mut_bytes = val.as_mut_bytes();
6983        assert!(u32::mut_from(mut_bytes).is_some());
6984
6985        assert!(u32::read_from(bytes).is_some());
6986
6987        let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
6988        assert!(slc.is_empty());
6989        assert_eq!(rest.len(), 4);
6990
6991        let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
6992        assert!(slc.is_empty());
6993        assert_eq!(rest.len(), 4);
6994
6995        let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
6996        assert!(slc.is_empty());
6997        assert_eq!(rest.len(), 4);
6998
6999        let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
7000        assert!(slc.is_empty());
7001        assert_eq!(rest.len(), 4);
7002    }
7003
7004    #[test]
7005    fn test_try_ref_from_prefix_suffix() {
7006        use crate::util::testutil::Align;
7007        let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
7008        let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
7009        assert_eq!(*r, 0);
7010        assert_eq!(rest.len(), 0);
7011
7012        let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
7013        assert_eq!(*r, 0);
7014        assert_eq!(rest.len(), 0);
7015    }
7016
7017    #[test]
7018    fn test_raw_dangling() {
7019        use crate::util::AsAddress;
7020        let ptr: NonNull<u32> = u32::raw_dangling();
7021        assert_eq!(AsAddress::addr(ptr), 1);
7022
7023        let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
7024        assert_eq!(AsAddress::addr(ptr), 1);
7025    }
7026
7027    #[test]
7028    fn test_try_ref_from_prefix_with_elems() {
7029        use crate::util::testutil::Align;
7030        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7031        let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
7032        assert_eq!(r.len(), 2);
7033        assert_eq!(rest.len(), 0);
7034    }
7035
7036    #[test]
7037    fn test_try_ref_from_suffix_with_elems() {
7038        use crate::util::testutil::Align;
7039        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7040        let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
7041        assert_eq!(r.len(), 2);
7042        assert_eq!(rest.len(), 0);
7043    }
7044}