netstack3_ip/
reassembly.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Module for IP fragmented packet reassembly support.
6//!
7//! `reassembly` is a utility to support reassembly of fragmented IP packets.
8//! Fragmented packets are associated by a combination of the packets' source
9//! address, destination address and identification value. When a potentially
10//! fragmented packet is received, this utility will check to see if the packet
11//! is in fact fragmented or not. If it isn't fragmented, it will be returned as
12//! is without any modification. If it is fragmented, this utility will capture
13//! its body and store it in a cache while waiting for all the fragments for a
14//! packet to arrive. The header information from a fragment with offset set to
15//! 0 will also be kept to add to the final, reassembled packet. Once this
16//! utility has received all the fragments for a combination of source address,
17//! destination address and identification value, the implementer will need to
18//! allocate a buffer of sufficient size to reassemble the final packet into and
19//! pass it to this utility. This utility will then attempt to reassemble and
20//! parse the packet, which will be returned to the caller. The caller should
21//! then handle the returned packet as a normal IP packet. Note, there is a
22//! timer from receipt of the first fragment to reassembly of the final packet.
23//! See [`REASSEMBLY_TIMEOUT_SECONDS`].
24//!
25//! Note, this utility does not support reassembly of jumbogram packets.
26//! According to the IPv6 Jumbogram RFC (RFC 2675), the jumbogram payload option
27//! is relevant only for nodes that may be attached to links with a link MTU
28//! greater than 65575 bytes. Note, the maximum size of a non-jumbogram IPv6
29//! packet is also 65575 (as the payload length field for IP packets is 16 bits
30//! + 40 byte IPv6 header). If a link supports an MTU greater than the maximum
31//! size of a non-jumbogram packet, the packet should not be fragmented.
32
33use alloc::collections::hash_map::{Entry, HashMap};
34use alloc::collections::{BTreeSet, BinaryHeap};
35use alloc::vec::Vec;
36use core::cmp::Ordering;
37use core::time::Duration;
38
39use assert_matches::assert_matches;
40use log::debug;
41use net_types::ip::{GenericOverIp, Ip, IpAddr, IpAddress, IpVersionMarker};
42use netstack3_base::{
43    CoreTimerContext, HandleableTimer, InstantBindingsTypes, IpExt, LocalTimerHeap,
44    TimerBindingsTypes, TimerContext,
45};
46use packet::BufferViewMut;
47use packet_formats::ip::IpPacket;
48use packet_formats::ipv4::{Ipv4Header, Ipv4Packet};
49use packet_formats::ipv6::ext_hdrs::Ipv6ExtensionHeaderData;
50use packet_formats::ipv6::Ipv6Packet;
51use zerocopy::{SplitByteSlice, SplitByteSliceMut};
52
53/// The maximum amount of time from receipt of the first fragment to reassembly
54/// of a packet. Note, "first fragment" does not mean a fragment with offset 0;
55/// it means the first fragment packet we receive with a new combination of
56/// source address, destination address and fragment identification value.
57const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(60);
58
59/// Number of bytes per fragment block for IPv4 and IPv6.
60///
61/// IPv4 outlines the fragment block size in RFC 791 section 3.1, under the
62/// fragment offset field's description: "The fragment offset is measured in
63/// units of 8 octets (64 bits)".
64///
65/// IPv6 outlines the fragment block size in RFC 8200 section 4.5, under the
66/// fragment offset field's description: "The offset, in 8-octet units, of the
67/// data following this header".
68const FRAGMENT_BLOCK_SIZE: u8 = 8;
69
70/// Maximum number of fragment blocks an IPv4 or IPv6 packet can have.
71///
72/// We use this value because both IPv4 fixed header's fragment offset field and
73/// IPv6 fragment extension header's fragment offset field are 13 bits wide.
74const MAX_FRAGMENT_BLOCKS: u16 = 8191;
75
76/// Maximum number of bytes of all currently cached fragments per IP protocol.
77///
78/// If the current cache size is less than this number, a new fragment can be
79/// cached (even if this will result in the total cache size exceeding this
80/// threshold). If the current cache size >= this number, the incoming fragment
81/// will be dropped.
82const MAX_FRAGMENT_CACHE_SIZE: usize = 4 * 1024 * 1024;
83
84/// The state context for the fragment cache.
85pub trait FragmentContext<I: Ip, BT: FragmentBindingsTypes> {
86    /// Returns a mutable reference to the fragment cache.
87    fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O;
88}
89
90/// The bindings types for IP packet fragment reassembly.
91pub trait FragmentBindingsTypes: TimerBindingsTypes + InstantBindingsTypes {}
92impl<BT> FragmentBindingsTypes for BT where BT: TimerBindingsTypes + InstantBindingsTypes {}
93
94/// The bindings execution context for IP packet fragment reassembly.
95pub trait FragmentBindingsContext: TimerContext + FragmentBindingsTypes {}
96impl<BC> FragmentBindingsContext for BC where BC: TimerContext + FragmentBindingsTypes {}
97
98/// The timer ID for the fragment cache.
99#[derive(Hash, Eq, PartialEq, Default, Clone, Debug, GenericOverIp)]
100#[generic_over_ip(I, Ip)]
101pub struct FragmentTimerId<I: Ip>(IpVersionMarker<I>);
102
103/// An implementation of a fragment cache.
104pub trait FragmentHandler<I: IpExt, BC> {
105    /// Attempts to process a packet fragment.
106    ///
107    /// # Panics
108    ///
109    /// Panics if the packet has no fragment data.
110    fn process_fragment<B: SplitByteSlice>(
111        &mut self,
112        bindings_ctx: &mut BC,
113        packet: I::Packet<B>,
114    ) -> FragmentProcessingState<I, B>
115    where
116        I::Packet<B>: FragmentablePacket;
117
118    /// Attempts to reassemble a packet.
119    ///
120    /// Attempts to reassemble a packet associated with a given
121    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
122    /// data. The caller is expected to allocate a buffer of sufficient size
123    /// (available from `process_fragment` when it returns a
124    /// `FragmentProcessingState::Ready` value) and provide it to
125    /// `reassemble_packet` as `buffer` where the packet will be reassembled
126    /// into.
127    ///
128    /// # Panics
129    ///
130    /// Panics if the provided `buffer` does not have enough capacity for the
131    /// reassembled packet. Also panics if a different `ctx` is passed to
132    /// `reassemble_packet` from the one passed to `process_fragment` when
133    /// processing a packet with a given `key` as `reassemble_packet` will fail
134    /// to cancel the reassembly timer.
135    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
136        &mut self,
137        bindings_ctx: &mut BC,
138        key: &FragmentCacheKey<I::Addr>,
139        buffer: BV,
140    ) -> Result<(), FragmentReassemblyError>;
141}
142
143impl<I: IpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>> FragmentHandler<I, BC>
144    for CC
145{
146    fn process_fragment<B: SplitByteSlice>(
147        &mut self,
148        bindings_ctx: &mut BC,
149        packet: I::Packet<B>,
150    ) -> FragmentProcessingState<I, B>
151    where
152        I::Packet<B>: FragmentablePacket,
153    {
154        self.with_state_mut(|cache| {
155            let (res, timer_action) = cache.process_fragment(packet);
156
157            if let Some(timer_action) = timer_action {
158                match timer_action {
159                    CacheTimerAction::CreateNewTimer(key) => {
160                        assert_eq!(
161                            cache.timers.schedule_after(bindings_ctx, key, (), REASSEMBLY_TIMEOUT),
162                            None
163                        )
164                    }
165                    CacheTimerAction::CancelExistingTimer(key) => {
166                        assert_ne!(cache.timers.cancel(bindings_ctx, &key), None)
167                    }
168                }
169            }
170
171            res
172        })
173    }
174
175    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
176        &mut self,
177        bindings_ctx: &mut BC,
178        key: &FragmentCacheKey<I::Addr>,
179        buffer: BV,
180    ) -> Result<(), FragmentReassemblyError> {
181        self.with_state_mut(|cache| {
182            let res = cache.reassemble_packet(key, buffer);
183
184            match res {
185                Ok(_) | Err(FragmentReassemblyError::PacketParsingError) => {
186                    // Cancel the reassembly timer as we attempt reassembly which
187                    // means we had all the fragments for the final packet, even
188                    // if parsing the reassembled packet failed.
189                    assert_matches!(cache.timers.cancel(bindings_ctx, key), Some(_));
190                }
191                Err(FragmentReassemblyError::InvalidKey)
192                | Err(FragmentReassemblyError::MissingFragments) => {}
193            }
194
195            res
196        })
197    }
198}
199
200impl<I: IpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>> HandleableTimer<CC, BC>
201    for FragmentTimerId<I>
202{
203    fn handle(self, core_ctx: &mut CC, bindings_ctx: &mut BC, _: BC::UniqueTimerId) {
204        let Self(IpVersionMarker { .. }) = self;
205        core_ctx.with_state_mut(|cache| {
206            let Some((key, ())) = cache.timers.pop(bindings_ctx) else {
207                return;
208            };
209
210            // If a timer fired, the `key` must still exist in our fragment cache.
211            let FragmentCacheData { missing_blocks: _, body_fragments, header: _, total_size } =
212                assert_matches!(cache.remove_data(&key), Some(c) => c);
213            debug!(
214                "reassembly for {key:?} \
215                timed out with {} fragments and {total_size} bytes",
216                body_fragments.len(),
217            );
218        });
219    }
220}
221
222/// Trait that must be implemented by any packet type that is fragmentable.
223pub trait FragmentablePacket {
224    /// Return fragment identifier data.
225    ///
226    /// Returns the fragment identification, offset and more flag as `(a, b, c)`
227    /// where `a` is the fragment identification value, `b` is the fragment
228    /// offset and `c` is the more flag.
229    ///
230    /// # Panics
231    ///
232    /// Panics if the packet has no fragment data.
233    fn fragment_data(&self) -> (u32, u16, bool);
234}
235
236impl<B: SplitByteSlice> FragmentablePacket for Ipv4Packet<B> {
237    fn fragment_data(&self) -> (u32, u16, bool) {
238        (u32::from(self.id()), self.fragment_offset().into_raw(), self.mf_flag())
239    }
240}
241
242impl<B: SplitByteSlice> FragmentablePacket for Ipv6Packet<B> {
243    fn fragment_data(&self) -> (u32, u16, bool) {
244        for ext_hdr in self.iter_extension_hdrs() {
245            if let Ipv6ExtensionHeaderData::Fragment { fragment_data } = ext_hdr.data() {
246                return (
247                    fragment_data.identification(),
248                    fragment_data.fragment_offset().into_raw(),
249                    fragment_data.m_flag(),
250                );
251            }
252        }
253
254        unreachable!(
255            "Should never call this function if the packet does not have a fragment header"
256        );
257    }
258}
259
260/// Possible return values for [`IpPacketFragmentCache::process_fragment`].
261#[derive(Debug)]
262pub enum FragmentProcessingState<I: IpExt, B: SplitByteSlice> {
263    /// The provided packet is not fragmented so no processing is required.
264    /// The packet is returned with this value without any modification.
265    NotNeeded(I::Packet<B>),
266
267    /// The provided packet is fragmented but it is malformed.
268    ///
269    /// Possible reasons for being malformed are:
270    ///  1) Body is not a multiple of `FRAGMENT_BLOCK_SIZE` and  it is not the
271    ///     last fragment (last fragment of a packet, not last fragment received
272    ///     for a packet).
273    ///  2) Overlaps with an existing fragment. This is explicitly not allowed
274    ///     for IPv6 as per RFC 8200 section 4.5 (more details in RFC 5722). We
275    ///     choose the same behaviour for IPv4 for the same reasons.
276    ///  3) Packet's fragment offset + # of fragment blocks >
277    ///     `MAX_FRAGMENT_BLOCKS`.
278    // TODO(ghanan): Investigate whether disallowing overlapping fragments for
279    //               IPv4 cause issues interoperating with hosts that produce
280    //               overlapping fragments.
281    InvalidFragment,
282
283    /// Successfully processed the provided fragment. We are still waiting on
284    /// more fragments for a packet to arrive before being ready to reassemble
285    /// the packet.
286    NeedMoreFragments,
287
288    /// Cannot process the fragment because `MAX_FRAGMENT_CACHE_SIZE` is
289    /// reached.
290    OutOfMemory,
291
292    /// Successfully processed the provided fragment. We now have all the
293    /// fragments we need to reassemble the packet. The caller must create a
294    /// buffer with capacity for at least `packet_len` bytes and provide the
295    /// buffer and `key` to `reassemble_packet`.
296    Ready { key: FragmentCacheKey<I::Addr>, packet_len: usize },
297}
298
299/// Possible errors when attempting to reassemble a packet.
300#[derive(Debug, PartialEq, Eq)]
301pub enum FragmentReassemblyError {
302    /// At least one fragment for a packet has not arrived.
303    MissingFragments,
304
305    /// A `FragmentCacheKey` is not associated with any packet. This could be
306    /// because either no fragment has yet arrived for a packet associated with
307    /// a `FragmentCacheKey` or some fragments did arrive, but the reassembly
308    /// timer expired and got discarded.
309    InvalidKey,
310
311    /// Packet parsing error.
312    PacketParsingError,
313}
314
315/// Fragment Cache Key.
316///
317/// Composed of the original packet's source address, destination address,
318/// and fragment id.
319#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
320pub struct FragmentCacheKey<A: IpAddress>(A, A, u32);
321
322impl<A: IpAddress> FragmentCacheKey<A> {
323    pub(crate) fn new(src_ip: A, dst_ip: A, fragment_id: u32) -> Self {
324        FragmentCacheKey(src_ip, dst_ip, fragment_id)
325    }
326}
327
328/// An inclusive-inclusive range of bytes within a reassembled packet.
329// NOTE: We use this instead of `std::ops::RangeInclusive` because the latter
330// provides getter methods which return references, and it adds a lot of
331// unnecessary dereferences.
332#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
333struct BlockRange {
334    start: u16,
335    end: u16,
336}
337
338/// Data required for fragmented packet reassembly.
339#[derive(Debug)]
340struct FragmentCacheData {
341    /// List of non-overlapping inclusive ranges of fragment blocks required
342    /// before being ready to reassemble a packet.
343    ///
344    /// When creating a new instance of `FragmentCacheData`, we will set
345    /// `missing_blocks` to a list with a single element representing all
346    /// blocks, (0, MAX_VALUE). In this case, MAX_VALUE will be set to
347    /// `core::u16::MAX`.
348    missing_blocks: BTreeSet<BlockRange>,
349
350    /// Received fragment blocks.
351    ///
352    /// We use a binary heap for help when reassembling packets. When we
353    /// reassemble packets, we will want to fill up a new buffer with all the
354    /// body fragments. The easiest way to do this is in order, from the
355    /// fragment with offset 0 to the fragment with the highest offset. Since we
356    /// only need to enforce the order when reassembling, we use a min-heap so
357    /// we have a defined order (increasing fragment offset values) when
358    /// popping. `BinaryHeap` is technically a max-heap, but we use the negative
359    /// of the offset values as the key for the heap. See
360    /// [`PacketBodyFragment::new`].
361    body_fragments: BinaryHeap<PacketBodyFragment>,
362
363    /// The header data for the reassembled packet.
364    ///
365    /// The header of the fragment packet with offset 0 will be used as the
366    /// header for the final, reassembled packet.
367    header: Option<Vec<u8>>,
368
369    /// Total number of bytes in the reassembled packet.
370    ///
371    /// This is used so that we don't have to iterated through `body_fragments`
372    /// and sum the partial body sizes to calculate the reassembled packet's
373    /// size.
374    total_size: usize,
375}
376
377impl Default for FragmentCacheData {
378    fn default() -> FragmentCacheData {
379        FragmentCacheData {
380            missing_blocks: core::iter::once(BlockRange { start: 0, end: u16::MAX }).collect(),
381            body_fragments: BinaryHeap::new(),
382            header: None,
383            total_size: 0,
384        }
385    }
386}
387
388impl FragmentCacheData {
389    /// Attempts to find a gap where `fragment_blocks_range` will fit in.
390    ///
391    /// Returns `Some(o)` if a valid gap is found where `o` is the gap's offset
392    /// range; otherwise, returns `None`. `fragment_blocks_range` is an
393    /// inclusive range of fragment block offsets.
394    fn find_gap(&self, BlockRange { start, end }: BlockRange) -> Option<BlockRange> {
395        use core::ops::Bound::{Included, Unbounded};
396
397        // Find a gap that starts earlier or at the same point as a fragment.
398        let possible_free_place =
399            self.missing_blocks.range((Unbounded, Included(BlockRange { start, end: u16::MAX })));
400
401        // Make sure that `fragment` belongs purely within
402        // `potential_gap`.
403        //
404        // If `fragment` does not fit purely within
405        // `potential_gap`, then at least one block in
406        // `fragment` overlaps with an already received block.
407        // We should never receive overlapping fragments from non-malicious
408        // nodes.
409        possible_free_place
410            .last()
411            .filter(|&range| {
412                // range.start <= start must be always true here - so comparing only ending part
413                return end <= range.end;
414            })
415            .copied()
416    }
417}
418
419/// A cache of inbound IP packet fragments.
420#[derive(Debug)]
421pub struct IpPacketFragmentCache<I: Ip, BT: FragmentBindingsTypes> {
422    cache: HashMap<FragmentCacheKey<I::Addr>, FragmentCacheData>,
423    size: usize,
424    threshold: usize,
425    timers: LocalTimerHeap<FragmentCacheKey<I::Addr>, (), BT>,
426}
427
428impl<I: Ip, BC: FragmentBindingsContext> IpPacketFragmentCache<I, BC> {
429    /// Creates a new `IpFragmentCache`.
430    pub fn new<CC: CoreTimerContext<FragmentTimerId<I>, BC>>(
431        bindings_ctx: &mut BC,
432    ) -> IpPacketFragmentCache<I, BC> {
433        IpPacketFragmentCache {
434            cache: HashMap::new(),
435            size: 0,
436            threshold: MAX_FRAGMENT_CACHE_SIZE,
437            timers: LocalTimerHeap::new(bindings_ctx, CC::convert_timer(Default::default())),
438        }
439    }
440}
441
442enum CacheTimerAction<A: IpAddress> {
443    CreateNewTimer(FragmentCacheKey<A>),
444    CancelExistingTimer(FragmentCacheKey<A>),
445}
446
447impl<I: IpExt, BT: FragmentBindingsTypes> IpPacketFragmentCache<I, BT> {
448    /// Attempts to process a packet fragment.
449    ///
450    /// # Panics
451    ///
452    /// Panics if the packet has no fragment data.
453    fn process_fragment<B: SplitByteSlice>(
454        &mut self,
455        packet: I::Packet<B>,
456    ) -> (FragmentProcessingState<I, B>, Option<CacheTimerAction<I::Addr>>)
457    where
458        I::Packet<B>: FragmentablePacket,
459    {
460        if self.above_size_threshold() {
461            return (FragmentProcessingState::OutOfMemory, None);
462        }
463
464        // Get the fragment data.
465        let (id, offset, m_flag) = packet.fragment_data();
466
467        // Check if `packet` is actually fragmented. We know it is not
468        // fragmented if the fragment offset is 0 (contains first fragment) and
469        // we have no more fragments. This means the first fragment is the only
470        // fragment, implying we have a full packet.
471        if offset == 0 && !m_flag {
472            return (FragmentProcessingState::NotNeeded(packet), None);
473        }
474
475        // Make sure packet's body isn't empty. Since at this point we know that
476        // the packet is definitely fragmented (`offset` is not 0 or `m_flag` is
477        // `true`), we simply let the caller know we need more fragments. This
478        // should never happen, but just in case :).
479        if packet.body().is_empty() {
480            return (FragmentProcessingState::NeedMoreFragments, None);
481        }
482
483        // Make sure body is a multiple of `FRAGMENT_BLOCK_SIZE` bytes, or
484        // `packet` contains the last fragment block which is allowed to be less
485        // than `FRAGMENT_BLOCK_SIZE` bytes.
486        if m_flag && (packet.body().len() % (FRAGMENT_BLOCK_SIZE as usize) != 0) {
487            return (FragmentProcessingState::InvalidFragment, None);
488        }
489
490        // Key used to find this connection's fragment cache data.
491        let key = FragmentCacheKey::new(packet.src_ip(), packet.dst_ip(), id);
492
493        // The number of fragment blocks `packet` contains.
494        //
495        // Note, we are calculating the ceiling of an integer division.
496        // Essentially:
497        //     ceil(packet.body.len() / FRAGMENT_BLOCK_SIZE)
498        //
499        // We need to calculate the ceiling of the division because the final
500        // fragment block for a reassembled packet is allowed to contain less
501        // than `FRAGMENT_BLOCK_SIZE` bytes.
502        //
503        // We know `packet.body().len() - 1` will never be less than 0 because
504        // we already made sure that `packet`'s body is not empty, and it is
505        // impossible to have a negative body size.
506        let num_fragment_blocks = 1 + ((packet.body().len() - 1) / (FRAGMENT_BLOCK_SIZE as usize));
507        assert!(num_fragment_blocks > 0);
508
509        // The range of fragment blocks `packet` contains.
510        //
511        // The maximum number of fragment blocks a reassembled packet is allowed
512        // to contain is `MAX_FRAGMENT_BLOCKS` so we make sure that the fragment
513        // we received does not violate this.
514        let fragment_blocks_range =
515            if let Ok(offset_end) = u16::try_from((offset as usize) + num_fragment_blocks - 1) {
516                if offset_end <= MAX_FRAGMENT_BLOCKS {
517                    BlockRange { start: offset, end: offset_end }
518                } else {
519                    return (FragmentProcessingState::InvalidFragment, None);
520                }
521            } else {
522                return (FragmentProcessingState::InvalidFragment, None);
523            };
524
525        // Get (or create) the fragment cache data.
526        let (fragment_data, timer_not_yet_scheduled) = self.get_or_create(key);
527
528        // Find the gap where `packet` belongs.
529        let found_gap = match fragment_data.find_gap(fragment_blocks_range) {
530            // We did not find a potential gap `packet` fits in so some of the
531            // fragment blocks in `packet` overlaps with fragment blocks we
532            // already received.
533            None => {
534                // Drop all reassembly data as per RFC 8200 section 4.5 (IPv6).
535                // See RFC 5722 for more information.
536                //
537                // IPv4 (RFC 791) does not specify what to do for overlapped
538                // fragments. RFC 1858 section 4.2 outlines a way to prevent an
539                // overlapping fragment attack for IPv4, but this is primarily
540                // for IP filtering since "no standard requires that an
541                // overlap-safe reassemble algorithm be used" on hosts. In
542                // practice, non-malicious nodes should not intentionally send
543                // data for the same fragment block multiple times, so we will
544                // do the same thing as IPv6 in this case.
545                //
546                // TODO(ghanan): Check to see if the fragment block's data is
547                //               identical to already received data before
548                //               dropping the reassembly data as packets may be
549                //               duplicated in the network. Duplicate packets
550                //               which are also fragmented are probably rare, so
551                //               we should first determine if it is even
552                //               worthwhile to do this check first. Note, we can
553                //               choose to simply not do this check as RFC 8200
554                //               section 4.5 mentions an implementation *may
555                //               choose* to do this check. It does not say we
556                //               MUST, so we would not be violating the RFC if
557                //               we don't check for this case and just drop the
558                //               packet.
559                assert_matches!(self.remove_data(&key), Some(_));
560
561                return (
562                    FragmentProcessingState::InvalidFragment,
563                    (!timer_not_yet_scheduled)
564                        .then_some(CacheTimerAction::CancelExistingTimer(key)),
565                );
566            }
567            Some(f) => f,
568        };
569
570        let timer_id = timer_not_yet_scheduled.then_some(CacheTimerAction::CreateNewTimer(key));
571
572        // Remove `found_gap` since the gap as it exists will no longer be
573        // valid.
574        assert!(fragment_data.missing_blocks.remove(&found_gap));
575
576        // If the received fragment blocks start after the beginning of
577        // `found_gap`, create a new gap between the beginning of `found_gap`
578        // and the first fragment block contained in `packet`.
579        //
580        // Example:
581        //   `packet` w/ fragments [4, 7]
582        //                 |-----|-----|-----|-----|
583        //                    4     5     6     7
584        //
585        //   `found_gap` w/ fragments [X, 7] where 0 <= X < 4
586        //     |-----| ... |-----|-----|-----|-----|
587        //        X    ...    4     5     6     7
588        //
589        //   Here we can see that with a `found_gap` of [2, 7], `packet` covers
590        //   [4, 7] but we are still missing [X, 3] so we create a new gap of
591        //   [X, 3].
592        if found_gap.start < fragment_blocks_range.start {
593            assert!(fragment_data
594                .missing_blocks
595                .insert(BlockRange { start: found_gap.start, end: fragment_blocks_range.end - 1 }));
596        }
597
598        // If the received fragment blocks end before the end of `found_gap` and
599        // we expect more fragments, create a new gap between the last fragment
600        // block contained in `packet` and the end of `found_gap`.
601        //
602        // Example 1:
603        //   `packet` w/ fragments [4, 7] & m_flag = true
604        //     |-----|-----|-----|-----|
605        //        4     5     6     7
606        //
607        //   `found_gap` w/ fragments [4, Y] where 7 < Y <= `MAX_FRAGMENT_BLOCKS`.
608        //     |-----|-----|-----|-----| ... |-----|
609        //        4     5     6     7    ...    Y
610        //
611        //   Here we can see that with a `found_gap` of [4, Y], `packet` covers
612        //   [4, 7] but we still expect more fragment blocks after the blocks in
613        //   `packet` (as noted by `m_flag`) so we are still missing [8, Y] so
614        //   we create a new gap of [8, Y].
615        //
616        // Example 2:
617        //   `packet` w/ fragments [4, 7] & m_flag = false
618        //     |-----|-----|-----|-----|
619        //        4     5     6     7
620        //
621        //   `found_gap` w/ fragments [4, Y] where MAX = `MAX_FRAGMENT_BLOCKS`.
622        //     |-----|-----|-----|-----| ... |-----|
623        //        4     5     6     7    ...   MAX
624        //
625        //   Here we can see that with a `found_gap` of [4, MAX], `packet`
626        //   covers [4, 7] and we don't expect more fragment blocks after the
627        //   blocks in `packet` (as noted by `m_flag`) so we don't create a new
628        //   gap. Note, if we encounter a `packet` where `m_flag` is false,
629        //   `found_gap`'s end value must be MAX because we should only ever not
630        //   create a new gap where the end is MAX when we are processing a
631        //   packet with the last fragment block.
632        if found_gap.end > fragment_blocks_range.end && m_flag {
633            assert!(fragment_data
634                .missing_blocks
635                .insert(BlockRange { start: fragment_blocks_range.end + 1, end: found_gap.end }));
636        } else if found_gap.end > fragment_blocks_range.end && !m_flag && found_gap.end < u16::MAX {
637            // There is another fragment after this one that is already present
638            // in the cache. That means that this fragment can't be the last
639            // one (must have `m_flag` set).
640            return (FragmentProcessingState::InvalidFragment, timer_id);
641        } else {
642            // Make sure that if we are not adding a fragment after the packet,
643            // it is because `packet` goes up to the `found_gap`'s end boundary,
644            // or this is the last fragment. If it is the last fragment for a
645            // packet, we make sure that `found_gap`'s end value is
646            // `core::u16::MAX`.
647            assert!(
648                found_gap.end == fragment_blocks_range.end
649                    || (!m_flag && found_gap.end == u16::MAX),
650                "found_gap: {:?}, fragment_blocks_range: {:?} offset: {:?}, m_flag: {:?}",
651                found_gap,
652                fragment_blocks_range,
653                offset,
654                m_flag
655            );
656        }
657
658        let mut added_bytes = 0;
659        // Get header buffer from `packet` if its fragment offset equals to 0.
660        if offset == 0 {
661            assert_eq!(fragment_data.header, None);
662            let header = get_header::<B, I>(&packet);
663            added_bytes = header.len();
664            fragment_data.header = Some(header);
665        }
666
667        // Add our `packet`'s body to the store of body fragments.
668        let mut body = Vec::with_capacity(packet.body().len());
669        body.extend_from_slice(packet.body());
670        added_bytes += body.len();
671        fragment_data.total_size += added_bytes;
672        fragment_data.body_fragments.push(PacketBodyFragment::new(offset, body));
673
674        // If we still have missing fragments, let the caller know that we are
675        // still waiting on some fragments. Otherwise, we let them know we are
676        // ready to reassemble and give them a key and the final packet length
677        // so they can allocate a sufficient buffer and call
678        // `reassemble_packet`.
679        let result = if fragment_data.missing_blocks.is_empty() {
680            FragmentProcessingState::Ready { key, packet_len: fragment_data.total_size }
681        } else {
682            FragmentProcessingState::NeedMoreFragments
683        };
684
685        self.increment_size(added_bytes);
686        (result, timer_id)
687    }
688
689    /// Attempts to reassemble a packet.
690    ///
691    /// Attempts to reassemble a packet associated with a given
692    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
693    /// data. The caller is expected to allocate a buffer of sufficient size
694    /// (available from `process_fragment` when it returns a
695    /// `FragmentProcessingState::Ready` value) and provide it to
696    /// `reassemble_packet` as `buffer` where the packet will be reassembled
697    /// into.
698    ///
699    /// # Panics
700    ///
701    /// Panics if the provided `buffer` does not have enough capacity for the
702    /// reassembled packet. Also panics if a different `ctx` is passed to
703    /// `reassemble_packet` from the one passed to `process_fragment` when
704    /// processing a packet with a given `key` as `reassemble_packet` will fail
705    /// to cancel the reassembly timer.
706    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
707        &mut self,
708        key: &FragmentCacheKey<I::Addr>,
709        buffer: BV,
710    ) -> Result<(), FragmentReassemblyError> {
711        let entry = match self.cache.entry(*key) {
712            Entry::Occupied(entry) => entry,
713            Entry::Vacant(_) => return Err(FragmentReassemblyError::InvalidKey),
714        };
715
716        // Make sure we are not missing fragments.
717        if !entry.get().missing_blocks.is_empty() {
718            return Err(FragmentReassemblyError::MissingFragments);
719        }
720        // Remove the entry from the cache now that we've validated that we will
721        // be able to reassemble it.
722        let (_key, data) = entry.remove_entry();
723        self.size -= data.total_size;
724
725        // If we are not missing fragments, we must have header data.
726        assert_matches!(data.header, Some(_));
727
728        // TODO(https://github.com/rust-lang/rust/issues/59278): Use
729        // `BinaryHeap::into_iter_sorted`.
730        let body_fragments = data.body_fragments.into_sorted_vec().into_iter().map(|x| x.data);
731        I::Packet::reassemble_fragmented_packet(buffer, data.header.unwrap(), body_fragments)
732            .map_err(|_| FragmentReassemblyError::PacketParsingError)
733    }
734
735    /// Gets or creates a new entry in the cache for a given `key`.
736    ///
737    /// Returns a tuple whose second component indicates whether a reassembly
738    /// timer needs to be scheduled.
739    fn get_or_create(&mut self, key: FragmentCacheKey<I::Addr>) -> (&mut FragmentCacheData, bool) {
740        match self.cache.entry(key) {
741            Entry::Occupied(e) => (e.into_mut(), false),
742            Entry::Vacant(e) => {
743                // We have no reassembly data yet so this fragment is the first
744                // one associated with the given `key`. Create a new entry in
745                // the hash table and let the caller know to schedule a timer to
746                // reset the entry.
747                (e.insert(FragmentCacheData::default()), true)
748            }
749        }
750    }
751
752    fn above_size_threshold(&self) -> bool {
753        self.size >= self.threshold
754    }
755
756    fn increment_size(&mut self, sz: usize) {
757        assert!(!self.above_size_threshold());
758        self.size += sz;
759    }
760
761    fn remove_data(&mut self, key: &FragmentCacheKey<I::Addr>) -> Option<FragmentCacheData> {
762        let data = self.cache.remove(key)?;
763        self.size -= data.total_size;
764        Some(data)
765    }
766}
767
768/// Gets the header bytes for a packet.
769fn get_header<B: SplitByteSlice, I: IpExt>(packet: &I::Packet<B>) -> Vec<u8> {
770    match packet.as_ip_addr_ref() {
771        IpAddr::V4(packet) => packet.copy_header_bytes_for_fragment(),
772        IpAddr::V6(packet) => {
773            // We are guaranteed not to panic here because we will only panic if
774            // `packet` does not have a fragment extension header. We can only get
775            // here if `packet` is a fragment packet, so we know that `packet` has a
776            // fragment extension header.
777            packet.copy_header_bytes_for_fragment()
778        }
779    }
780}
781
782/// A fragment of a packet's body.
783#[derive(Debug, PartialEq, Eq)]
784struct PacketBodyFragment {
785    offset: u16,
786    data: Vec<u8>,
787}
788
789impl PacketBodyFragment {
790    /// Constructs a new `PacketBodyFragment` to be stored in a `BinaryHeap`.
791    fn new(offset: u16, data: Vec<u8>) -> Self {
792        PacketBodyFragment { offset, data }
793    }
794}
795
796// The ordering of a `PacketBodyFragment` is only dependant on the fragment
797// offset.
798impl PartialOrd for PacketBodyFragment {
799    fn partial_cmp(&self, other: &PacketBodyFragment) -> Option<Ordering> {
800        Some(self.cmp(other))
801    }
802}
803
804impl Ord for PacketBodyFragment {
805    fn cmp(&self, other: &Self) -> Ordering {
806        self.offset.cmp(&other.offset)
807    }
808}
809
810#[cfg(test)]
811mod tests {
812    use alloc::vec;
813
814    use assert_matches::assert_matches;
815    use ip_test_macro::ip_test;
816    use net_types::ip::{Ipv4, Ipv6};
817    use net_types::Witness;
818    use netstack3_base::testutil::{
819        assert_empty, FakeBindingsCtx, FakeCoreCtx, FakeInstant, FakeTimerCtxExt, TestAddrs,
820        TEST_ADDRS_V4, TEST_ADDRS_V6,
821    };
822    use netstack3_base::{CtxPair, IntoCoreTimerCtx};
823    use packet::{Buf, ParsablePacket, ParseBuffer, Serializer};
824    use packet_formats::ip::{FragmentOffset, IpProto, Ipv6ExtHdrType};
825    use packet_formats::ipv4::Ipv4PacketBuilder;
826    use packet_formats::ipv6::Ipv6PacketBuilder;
827
828    use super::*;
829
830    struct FakeFragmentContext<I: Ip, BT: FragmentBindingsTypes> {
831        cache: IpPacketFragmentCache<I, BT>,
832    }
833
834    impl<I: Ip, BC: FragmentBindingsContext> FakeFragmentContext<I, BC>
835    where
836        BC::DispatchId: From<FragmentTimerId<I>>,
837    {
838        fn new(bindings_ctx: &mut BC) -> Self {
839            Self { cache: IpPacketFragmentCache::new::<IntoCoreTimerCtx>(bindings_ctx) }
840        }
841    }
842
843    type FakeCtxImpl<I> = CtxPair<FakeCoreCtxImpl<I>, FakeBindingsCtxImpl<I>>;
844    type FakeBindingsCtxImpl<I> = FakeBindingsCtx<FragmentTimerId<I>, (), (), ()>;
845    type FakeCoreCtxImpl<I> = FakeCoreCtx<FakeFragmentContext<I, FakeBindingsCtxImpl<I>>, (), ()>;
846
847    impl<I: Ip> FragmentContext<I, FakeBindingsCtxImpl<I>> for FakeCoreCtxImpl<I> {
848        fn with_state_mut<
849            O,
850            F: FnOnce(&mut IpPacketFragmentCache<I, FakeBindingsCtxImpl<I>>) -> O,
851        >(
852            &mut self,
853            cb: F,
854        ) -> O {
855            cb(&mut self.state.cache)
856        }
857    }
858
859    macro_rules! assert_frag_proc_state_ready {
860        ($lhs:expr, $src_ip:expr, $dst_ip:expr, $fragment_id:expr, $packet_len:expr) => {{
861            let lhs_val = $lhs;
862            match lhs_val {
863                FragmentProcessingState::Ready { key, packet_len } => {
864                    if key == FragmentCacheKey::new($src_ip, $dst_ip, $fragment_id as u32)
865                        && packet_len == $packet_len
866                    {
867                        (key, packet_len)
868                    } else {
869                        panic!("Invalid key or packet_len values");
870                    }
871                }
872                _ => panic!("{:?} is not `Ready`", lhs_val),
873            }
874        }};
875    }
876
877    /// The result `process_ipv4_fragment` or `process_ipv6_fragment` should
878    /// expect after processing a fragment.
879    #[derive(PartialEq)]
880    enum ExpectedResult {
881        /// After processing a packet fragment, we should be ready to reassemble
882        /// the packet.
883        Ready { total_body_len: usize },
884
885        /// After processing a packet fragment, we need more packet fragments
886        /// before being ready to reassemble the packet.
887        NeedMore,
888
889        /// The packet fragment is invalid.
890        Invalid,
891
892        /// The Cache is full.
893        OutOfMemory,
894    }
895
896    /// Get an IPv4 packet builder.
897    fn get_ipv4_builder() -> Ipv4PacketBuilder {
898        Ipv4PacketBuilder::new(
899            TEST_ADDRS_V4.remote_ip,
900            TEST_ADDRS_V4.local_ip,
901            10,
902            IpProto::Tcp.into(),
903        )
904    }
905
906    /// Get an IPv6 packet builder.
907    fn get_ipv6_builder() -> Ipv6PacketBuilder {
908        Ipv6PacketBuilder::new(
909            TEST_ADDRS_V6.remote_ip,
910            TEST_ADDRS_V6.local_ip,
911            10,
912            IpProto::Tcp.into(),
913        )
914    }
915
916    /// Validate that IpPacketFragmentCache has correct size.
917    fn validate_size<I: Ip, BT: FragmentBindingsTypes>(cache: &IpPacketFragmentCache<I, BT>) {
918        let mut sz: usize = 0;
919
920        for v in cache.cache.values() {
921            sz += v.total_size;
922        }
923
924        assert_eq!(sz, cache.size);
925    }
926
927    /// Processes an IP fragment depending on the `Ip` `process_ip_fragment` is
928    /// specialized with.
929    ///
930    /// See [`process_ipv4_fragment`] and [`process_ipv6_fragment`] which will
931    /// be called when `I` is `Ipv4` and `Ipv6`, respectively.
932    fn process_ip_fragment<
933        I: TestIpExt,
934        CC: FragmentContext<I, BC>,
935        BC: FragmentBindingsContext,
936    >(
937        core_ctx: &mut CC,
938        bindings_ctx: &mut BC,
939        fragment_id: u16,
940        fragment_offset: u16,
941        m_flag: bool,
942        expected_result: ExpectedResult,
943    ) {
944        I::process_ip_fragment(
945            core_ctx,
946            bindings_ctx,
947            fragment_id,
948            fragment_offset,
949            m_flag,
950            expected_result,
951        )
952    }
953
954    /// Generates and processes an IPv4 fragment packet.
955    ///
956    /// The generated packet will have body of size `FRAGMENT_BLOCK_SIZE` bytes.
957    fn process_ipv4_fragment<CC: FragmentContext<Ipv4, BC>, BC: FragmentBindingsContext>(
958        core_ctx: &mut CC,
959        bindings_ctx: &mut BC,
960        fragment_id: u16,
961        fragment_offset: u16,
962        m_flag: bool,
963        expected_result: ExpectedResult,
964    ) {
965        let mut builder = get_ipv4_builder();
966        builder.id(fragment_id);
967        builder.fragment_offset(FragmentOffset::new(fragment_offset).unwrap());
968        builder.mf_flag(m_flag);
969        let body =
970            generate_body_fragment(fragment_id, fragment_offset, usize::from(FRAGMENT_BLOCK_SIZE));
971
972        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
973        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
974
975        match expected_result {
976            ExpectedResult::Ready { total_body_len } => {
977                let _: (FragmentCacheKey<_>, usize) = assert_frag_proc_state_ready!(
978                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
979                    TEST_ADDRS_V4.remote_ip.get(),
980                    TEST_ADDRS_V4.local_ip.get(),
981                    fragment_id,
982                    total_body_len + Ipv4::HEADER_LENGTH
983                );
984            }
985            ExpectedResult::NeedMore => {
986                assert_matches!(
987                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
988                    FragmentProcessingState::NeedMoreFragments
989                );
990            }
991            ExpectedResult::Invalid => {
992                assert_matches!(
993                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
994                    FragmentProcessingState::InvalidFragment
995                );
996            }
997            ExpectedResult::OutOfMemory => {
998                assert_matches!(
999                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
1000                    FragmentProcessingState::OutOfMemory
1001                );
1002            }
1003        }
1004    }
1005
1006    /// Generates and processes an IPv6 fragment packet.
1007    ///
1008    /// The generated packet will have body of size `FRAGMENT_BLOCK_SIZE` bytes.
1009    fn process_ipv6_fragment<CC: FragmentContext<Ipv6, BC>, BC: FragmentBindingsContext>(
1010        core_ctx: &mut CC,
1011        bindings_ctx: &mut BC,
1012        fragment_id: u16,
1013        fragment_offset: u16,
1014        m_flag: bool,
1015        expected_result: ExpectedResult,
1016    ) {
1017        let mut bytes = vec![0; 48];
1018        bytes[..4].copy_from_slice(&[0x60, 0x20, 0x00, 0x77][..]);
1019        bytes[6] = Ipv6ExtHdrType::Fragment.into(); // Next Header
1020        bytes[7] = 64;
1021        bytes[8..24].copy_from_slice(TEST_ADDRS_V6.remote_ip.bytes());
1022        bytes[24..40].copy_from_slice(TEST_ADDRS_V6.local_ip.bytes());
1023        bytes[40] = IpProto::Tcp.into();
1024        bytes[42] = (fragment_offset >> 5) as u8;
1025        bytes[43] = ((fragment_offset & 0x1F) << 3) as u8 | if m_flag { 1 } else { 0 };
1026        bytes[44..48].copy_from_slice(&(fragment_id as u32).to_be_bytes());
1027        bytes.extend(
1028            generate_body_fragment(fragment_id, fragment_offset, usize::from(FRAGMENT_BLOCK_SIZE))
1029                .iter(),
1030        );
1031        let payload_len = (bytes.len() - Ipv6::HEADER_LENGTH) as u16;
1032        bytes[4..6].copy_from_slice(&payload_len.to_be_bytes());
1033        let mut buf = Buf::new(bytes, ..);
1034        let packet = buf.parse::<Ipv6Packet<_>>().unwrap();
1035
1036        match expected_result {
1037            ExpectedResult::Ready { total_body_len } => {
1038                let _: (FragmentCacheKey<_>, usize) = assert_frag_proc_state_ready!(
1039                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
1040                    TEST_ADDRS_V6.remote_ip.get(),
1041                    TEST_ADDRS_V6.local_ip.get(),
1042                    fragment_id,
1043                    total_body_len + Ipv6::HEADER_LENGTH
1044                );
1045            }
1046            ExpectedResult::NeedMore => {
1047                assert_matches!(
1048                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
1049                    FragmentProcessingState::NeedMoreFragments
1050                );
1051            }
1052            ExpectedResult::Invalid => {
1053                assert_matches!(
1054                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
1055                    FragmentProcessingState::InvalidFragment
1056                );
1057            }
1058            ExpectedResult::OutOfMemory => {
1059                assert_matches!(
1060                    FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet),
1061                    FragmentProcessingState::OutOfMemory
1062                );
1063            }
1064        }
1065    }
1066
1067    trait TestIpExt: netstack3_base::testutil::TestIpExt {
1068        const HEADER_LENGTH: usize;
1069
1070        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1071            core_ctx: &mut CC,
1072            bindings_ctx: &mut BC,
1073            fragment_id: u16,
1074            fragment_offset: u16,
1075            m_flag: bool,
1076            expected_result: ExpectedResult,
1077        );
1078    }
1079
1080    impl TestIpExt for Ipv4 {
1081        const HEADER_LENGTH: usize = packet_formats::ipv4::HDR_PREFIX_LEN;
1082
1083        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1084            core_ctx: &mut CC,
1085            bindings_ctx: &mut BC,
1086            fragment_id: u16,
1087            fragment_offset: u16,
1088            m_flag: bool,
1089            expected_result: ExpectedResult,
1090        ) {
1091            process_ipv4_fragment(
1092                core_ctx,
1093                bindings_ctx,
1094                fragment_id,
1095                fragment_offset,
1096                m_flag,
1097                expected_result,
1098            )
1099        }
1100    }
1101    impl TestIpExt for Ipv6 {
1102        const HEADER_LENGTH: usize = packet_formats::ipv6::IPV6_FIXED_HDR_LEN;
1103
1104        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1105            core_ctx: &mut CC,
1106            bindings_ctx: &mut BC,
1107            fragment_id: u16,
1108            fragment_offset: u16,
1109            m_flag: bool,
1110            expected_result: ExpectedResult,
1111        ) {
1112            process_ipv6_fragment(
1113                core_ctx,
1114                bindings_ctx,
1115                fragment_id,
1116                fragment_offset,
1117                m_flag,
1118                expected_result,
1119            )
1120        }
1121    }
1122
1123    /// Tries to reassemble the packet with the given fragment ID.
1124    fn try_reassemble_ip_packet<
1125        I: TestIpExt + netstack3_base::IpExt,
1126        CC: FragmentContext<I, BC>,
1127        BC: FragmentBindingsContext,
1128    >(
1129        core_ctx: &mut CC,
1130        bindings_ctx: &mut BC,
1131        fragment_id: u16,
1132        total_body_len: usize,
1133    ) {
1134        let mut buffer: Vec<u8> = vec![0; total_body_len + I::HEADER_LENGTH];
1135        let mut buffer = &mut buffer[..];
1136        let key = FragmentCacheKey::new(
1137            I::TEST_ADDRS.remote_ip.get(),
1138            I::TEST_ADDRS.local_ip.get(),
1139            fragment_id.into(),
1140        );
1141
1142        FragmentHandler::reassemble_packet(core_ctx, bindings_ctx, &key, &mut buffer).unwrap();
1143        let packet = I::Packet::parse_mut(&mut buffer, ()).unwrap();
1144
1145        let expected_body = generate_body_fragment(fragment_id, 0, total_body_len);
1146        assert_eq!(packet.body(), &expected_body[..]);
1147    }
1148
1149    /// Generates the body of a packet with the given fragment ID, offset, and
1150    /// length.
1151    ///
1152    /// Overlapping body bytes from different calls to `generate_body_fragment`
1153    /// are guaranteed to have the same values.
1154    fn generate_body_fragment(fragment_id: u16, fragment_offset: u16, len: usize) -> Vec<u8> {
1155        // The body contains increasing byte values which start at `fragment_id`
1156        // at byte 0. This ensures that different packets with different
1157        // fragment IDs contain bodies with different byte values.
1158        let start = usize::from(fragment_id)
1159            + usize::from(fragment_offset) * usize::from(FRAGMENT_BLOCK_SIZE);
1160        (start..start + len).map(|byte| byte as u8).collect()
1161    }
1162
1163    /// Gets a `FragmentCacheKey` with the remote and local IP addresses hard
1164    /// coded to their test values.
1165    fn test_key<I: TestIpExt>(id: u32) -> FragmentCacheKey<I::Addr> {
1166        FragmentCacheKey::new(I::TEST_ADDRS.remote_ip.get(), I::TEST_ADDRS.local_ip.get(), id)
1167    }
1168
1169    fn new_context<I: Ip>() -> FakeCtxImpl<I> {
1170        FakeCtxImpl::<I>::with_default_bindings_ctx(|bindings_ctx| {
1171            FakeCoreCtxImpl::with_state(FakeFragmentContext::new(bindings_ctx))
1172        })
1173    }
1174
1175    #[test]
1176    fn test_ipv4_reassembly_not_needed() {
1177        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1178
1179        // Test that we don't attempt reassembly if the packet is not
1180        // fragmented.
1181
1182        let builder = get_ipv4_builder();
1183        let body = [1, 2, 3, 4, 5];
1184        let mut buffer =
1185            Buf::new(body.to_vec(), ..).encapsulate(builder).serialize_vec_outer().unwrap();
1186        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1187        assert_matches!(
1188            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1189            FragmentProcessingState::NotNeeded(unfragmented) if unfragmented.body() == body
1190        );
1191    }
1192
1193    #[test]
1194    #[should_panic(
1195        expected = "internal error: entered unreachable code: Should never call this function if the packet does not have a fragment header"
1196    )]
1197    fn test_ipv6_reassembly_not_needed() {
1198        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1199
1200        // Test that we panic if we call `fragment_data` on a packet that has no
1201        // fragment data.
1202
1203        let builder = get_ipv6_builder();
1204        let mut buffer =
1205            Buf::new(vec![1, 2, 3, 4, 5], ..).encapsulate(builder).serialize_vec_outer().unwrap();
1206        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1207        assert_matches!(
1208            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1209            FragmentProcessingState::InvalidFragment
1210        );
1211    }
1212
1213    #[ip_test(I)]
1214    fn test_ip_reassembly<I: TestIpExt + netstack3_base::IpExt>() {
1215        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1216        let fragment_id = 5;
1217
1218        // Test that we properly reassemble fragmented packets.
1219
1220        // Process fragment #0
1221        process_ip_fragment(
1222            &mut core_ctx,
1223            &mut bindings_ctx,
1224            fragment_id,
1225            0,
1226            true,
1227            ExpectedResult::NeedMore,
1228        );
1229
1230        // Process fragment #1
1231        process_ip_fragment(
1232            &mut core_ctx,
1233            &mut bindings_ctx,
1234            fragment_id,
1235            1,
1236            true,
1237            ExpectedResult::NeedMore,
1238        );
1239
1240        // Process fragment #2
1241        process_ip_fragment(
1242            &mut core_ctx,
1243            &mut bindings_ctx,
1244            fragment_id,
1245            2,
1246            false,
1247            ExpectedResult::Ready { total_body_len: 24 },
1248        );
1249
1250        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, fragment_id, 24);
1251    }
1252
1253    #[ip_test(I)]
1254    fn test_ip_reassemble_with_missing_blocks<I: TestIpExt + netstack3_base::IpExt>() {
1255        let fake_config = I::TEST_ADDRS;
1256        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1257        let fragment_id = 5;
1258
1259        // Test the error we get when we attempt to reassemble with missing
1260        // fragments.
1261
1262        // Process fragment #0
1263        process_ip_fragment(
1264            &mut core_ctx,
1265            &mut bindings_ctx,
1266            fragment_id,
1267            0,
1268            true,
1269            ExpectedResult::NeedMore,
1270        );
1271
1272        // Process fragment #2
1273        process_ip_fragment(
1274            &mut core_ctx,
1275            &mut bindings_ctx,
1276            fragment_id,
1277            1,
1278            true,
1279            ExpectedResult::NeedMore,
1280        );
1281
1282        let mut buffer: Vec<u8> = vec![0; 1];
1283        let mut buffer = &mut buffer[..];
1284        let key = FragmentCacheKey::new(
1285            fake_config.remote_ip.get(),
1286            fake_config.local_ip.get(),
1287            fragment_id as u32,
1288        );
1289        assert_eq!(
1290            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1291                .unwrap_err(),
1292            FragmentReassemblyError::MissingFragments,
1293        );
1294    }
1295
1296    #[ip_test(I)]
1297    fn test_ip_reassemble_after_timer<I: TestIpExt + netstack3_base::IpExt>() {
1298        let fake_config = I::TEST_ADDRS;
1299        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1300        let fragment_id = 5;
1301        let key = test_key::<I>(fragment_id.into());
1302
1303        // Make sure no timers in the dispatcher yet.
1304        bindings_ctx.timers.assert_no_timers_installed();
1305        assert_eq!(core_ctx.state.cache.size, 0);
1306
1307        // Test that we properly reset fragment cache on timer.
1308
1309        // Process fragment #0
1310        process_ip_fragment(
1311            &mut core_ctx,
1312            &mut bindings_ctx,
1313            fragment_id,
1314            0,
1315            true,
1316            ExpectedResult::NeedMore,
1317        );
1318
1319        // Make sure a timer got added.
1320        core_ctx.state.cache.timers.assert_timers([(
1321            key,
1322            (),
1323            FakeInstant::from(REASSEMBLY_TIMEOUT),
1324        )]);
1325        validate_size(&core_ctx.state.cache);
1326
1327        // Process fragment #1
1328        process_ip_fragment(
1329            &mut core_ctx,
1330            &mut bindings_ctx,
1331            fragment_id,
1332            1,
1333            true,
1334            ExpectedResult::NeedMore,
1335        );
1336        // Make sure no new timers got added or fired.
1337        core_ctx.state.cache.timers.assert_timers([(
1338            key,
1339            (),
1340            FakeInstant::from(REASSEMBLY_TIMEOUT),
1341        )]);
1342        validate_size(&core_ctx.state.cache);
1343
1344        // Process fragment #2
1345        process_ip_fragment(
1346            &mut core_ctx,
1347            &mut bindings_ctx,
1348            fragment_id,
1349            2,
1350            false,
1351            ExpectedResult::Ready { total_body_len: 24 },
1352        );
1353        // Make sure no new timers got added or fired.
1354        core_ctx.state.cache.timers.assert_timers([(
1355            key,
1356            (),
1357            FakeInstant::from(REASSEMBLY_TIMEOUT),
1358        )]);
1359        validate_size(&core_ctx.state.cache);
1360
1361        // Trigger the timer (simulate a timer for the fragmented packet).
1362        assert_eq!(
1363            bindings_ctx.trigger_next_timer(&mut core_ctx),
1364            Some(FragmentTimerId::<I>::default())
1365        );
1366
1367        // Make sure no other times exist..
1368        bindings_ctx.timers.assert_no_timers_installed();
1369        assert_eq!(core_ctx.state.cache.size, 0);
1370
1371        // Attempt to reassemble the packet but get an error since the fragment
1372        // data would have been reset/cleared.
1373        let key = FragmentCacheKey::new(
1374            fake_config.local_ip.get(),
1375            fake_config.remote_ip.get(),
1376            fragment_id as u32,
1377        );
1378        let packet_len = 44;
1379        let mut buffer: Vec<u8> = vec![0; packet_len];
1380        let mut buffer = &mut buffer[..];
1381        assert_eq!(
1382            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1383                .unwrap_err(),
1384            FragmentReassemblyError::InvalidKey,
1385        );
1386    }
1387
1388    #[ip_test(I)]
1389    fn test_ip_fragment_cache_oom<I: TestIpExt + netstack3_base::IpExt>() {
1390        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1391        let mut fragment_id = 0;
1392        const THRESHOLD: usize = 8196usize;
1393
1394        assert_eq!(core_ctx.state.cache.size, 0);
1395        core_ctx.state.cache.threshold = THRESHOLD;
1396
1397        // Test that when cache size exceeds the threshold, process_fragment
1398        // returns OOM.
1399
1400        while core_ctx.state.cache.size < THRESHOLD {
1401            process_ip_fragment(
1402                &mut core_ctx,
1403                &mut bindings_ctx,
1404                fragment_id,
1405                0,
1406                true,
1407                ExpectedResult::NeedMore,
1408            );
1409            validate_size(&core_ctx.state.cache);
1410            fragment_id += 1;
1411        }
1412
1413        // Now that the cache is at or above the threshold, observe OOM.
1414        process_ip_fragment(
1415            &mut core_ctx,
1416            &mut bindings_ctx,
1417            fragment_id,
1418            0,
1419            true,
1420            ExpectedResult::OutOfMemory,
1421        );
1422        validate_size(&core_ctx.state.cache);
1423
1424        // Trigger the timers, which will clear the cache.
1425        let timers = bindings_ctx
1426            .trigger_timers_for(REASSEMBLY_TIMEOUT + Duration::from_secs(1), &mut core_ctx)
1427            .len();
1428        assert!(timers == 171 || timers == 293, "timers is {timers}"); // ipv4 || ipv6
1429        assert_eq!(core_ctx.state.cache.size, 0);
1430        validate_size(&core_ctx.state.cache);
1431
1432        // Can process fragments again.
1433        process_ip_fragment(
1434            &mut core_ctx,
1435            &mut bindings_ctx,
1436            fragment_id,
1437            0,
1438            true,
1439            ExpectedResult::NeedMore,
1440        );
1441    }
1442
1443    #[ip_test(I)]
1444    fn test_unordered_fragments<I: TestIpExt>() {
1445        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1446        let fragment_id = 5;
1447
1448        // Test that we error on overlapping/duplicate fragments.
1449
1450        // Process fragment #0
1451        process_ip_fragment(
1452            &mut core_ctx,
1453            &mut bindings_ctx,
1454            fragment_id,
1455            0,
1456            true,
1457            ExpectedResult::NeedMore,
1458        );
1459
1460        // Process fragment #2
1461        process_ip_fragment(
1462            &mut core_ctx,
1463            &mut bindings_ctx,
1464            fragment_id,
1465            2,
1466            false,
1467            ExpectedResult::NeedMore,
1468        );
1469
1470        // Process fragment #1
1471        process_ip_fragment(
1472            &mut core_ctx,
1473            &mut bindings_ctx,
1474            fragment_id,
1475            1,
1476            true,
1477            ExpectedResult::Ready { total_body_len: 24 },
1478        );
1479    }
1480
1481    #[ip_test(I)]
1482    fn test_ip_overlapping_single_fragment<I: TestIpExt>() {
1483        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1484        let fragment_id = 5;
1485
1486        // Test that we error on overlapping/duplicate fragments.
1487
1488        // Process fragment #0
1489        process_ip_fragment(
1490            &mut core_ctx,
1491            &mut bindings_ctx,
1492            fragment_id,
1493            0,
1494            true,
1495            ExpectedResult::NeedMore,
1496        );
1497
1498        // Process fragment #0 (overlaps original fragment #0 completely)
1499        process_ip_fragment(
1500            &mut core_ctx,
1501            &mut bindings_ctx,
1502            fragment_id,
1503            0,
1504            true,
1505            ExpectedResult::Invalid,
1506        );
1507    }
1508
1509    #[test]
1510    fn test_ipv4_fragment_not_multiple_of_offset_unit() {
1511        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1512        let fragment_id = 0;
1513
1514        assert_eq!(core_ctx.state.cache.size, 0);
1515        // Test that fragment bodies must be a multiple of
1516        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1517
1518        // Process fragment #0
1519        process_ipv4_fragment(
1520            &mut core_ctx,
1521            &mut bindings_ctx,
1522            fragment_id,
1523            0,
1524            true,
1525            ExpectedResult::NeedMore,
1526        );
1527
1528        // Process fragment #1 (body size is not a multiple of
1529        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1530        let mut builder = get_ipv4_builder();
1531        builder.id(fragment_id);
1532        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1533        builder.mf_flag(true);
1534        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1535        // multiple of `FRAGMENT_BLOCK_SIZE`.
1536        let mut body: Vec<u8> = Vec::new();
1537        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1538        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1539        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1540        assert_matches!(
1541            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1542            FragmentProcessingState::InvalidFragment
1543        );
1544
1545        // Process fragment #1 (body size is not a multiple of
1546        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1547        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1548        let mut builder = get_ipv4_builder();
1549        builder.id(fragment_id);
1550        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1551        builder.mf_flag(false);
1552        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1553        // multiple of `FRAGMENT_BLOCK_SIZE`.
1554        let mut body: Vec<u8> = Vec::new();
1555        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1556        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1557        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1558        let (key, packet_len) = assert_frag_proc_state_ready!(
1559            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1560            TEST_ADDRS_V4.remote_ip.get(),
1561            TEST_ADDRS_V4.local_ip.get(),
1562            fragment_id,
1563            35
1564        );
1565        validate_size(&core_ctx.state.cache);
1566        let mut buffer: Vec<u8> = vec![0; packet_len];
1567        let mut buffer = &mut buffer[..];
1568        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1569            .unwrap();
1570        let packet = Ipv4Packet::parse_mut(&mut buffer, ()).unwrap();
1571        let mut expected_body: Vec<u8> = Vec::new();
1572        expected_body.extend(0..15);
1573        assert_eq!(packet.body(), &expected_body[..]);
1574        assert_eq!(core_ctx.state.cache.size, 0);
1575    }
1576
1577    #[test]
1578    fn test_ipv6_fragment_not_multiple_of_offset_unit() {
1579        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1580        let fragment_id = 0;
1581
1582        assert_eq!(core_ctx.state.cache.size, 0);
1583        // Test that fragment bodies must be a multiple of
1584        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1585
1586        // Process fragment #0
1587        process_ipv6_fragment(
1588            &mut core_ctx,
1589            &mut bindings_ctx,
1590            fragment_id,
1591            0,
1592            true,
1593            ExpectedResult::NeedMore,
1594        );
1595
1596        // Process fragment #1 (body size is not a multiple of
1597        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1598        let mut bytes = vec![0; 48];
1599        bytes[..4].copy_from_slice(&[0x60, 0x20, 0x00, 0x77][..]);
1600        bytes[6] = Ipv6ExtHdrType::Fragment.into(); // Next Header
1601        bytes[7] = 64;
1602        bytes[8..24].copy_from_slice(TEST_ADDRS_V6.remote_ip.bytes());
1603        bytes[24..40].copy_from_slice(TEST_ADDRS_V6.local_ip.bytes());
1604        bytes[40] = IpProto::Tcp.into();
1605        bytes[42] = 0;
1606        bytes[43] = (1 << 3) | 1;
1607        bytes[44..48].copy_from_slice(&u32::try_from(fragment_id).unwrap().to_be_bytes());
1608        bytes.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1609        let payload_len = (bytes.len() - 40) as u16;
1610        bytes[4..6].copy_from_slice(&payload_len.to_be_bytes());
1611        let mut buf = Buf::new(bytes, ..);
1612        let packet = buf.parse::<Ipv6Packet<_>>().unwrap();
1613        assert_matches!(
1614            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1615            FragmentProcessingState::InvalidFragment
1616        );
1617
1618        // Process fragment #1 (body size is not a multiple of
1619        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1620        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1621        let mut bytes = vec![0; 48];
1622        bytes[..4].copy_from_slice(&[0x60, 0x20, 0x00, 0x77][..]);
1623        bytes[6] = Ipv6ExtHdrType::Fragment.into(); // Next Header
1624        bytes[7] = 64;
1625        bytes[8..24].copy_from_slice(TEST_ADDRS_V6.remote_ip.bytes());
1626        bytes[24..40].copy_from_slice(TEST_ADDRS_V6.local_ip.bytes());
1627        bytes[40] = IpProto::Tcp.into();
1628        bytes[42] = 0;
1629        bytes[43] = 1 << 3;
1630        bytes[44..48].copy_from_slice(&u32::try_from(fragment_id).unwrap().to_be_bytes());
1631        bytes.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1632        let payload_len = (bytes.len() - 40) as u16;
1633        bytes[4..6].copy_from_slice(&payload_len.to_be_bytes());
1634        let mut buf = Buf::new(bytes, ..);
1635        let packet = buf.parse::<Ipv6Packet<_>>().unwrap();
1636        let (key, packet_len) = assert_frag_proc_state_ready!(
1637            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1638            TEST_ADDRS_V6.remote_ip.get(),
1639            TEST_ADDRS_V6.local_ip.get(),
1640            fragment_id,
1641            55
1642        );
1643        validate_size(&core_ctx.state.cache);
1644        let mut buffer: Vec<u8> = vec![0; packet_len];
1645        let mut buffer = &mut buffer[..];
1646        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1647            .unwrap();
1648        let packet = Ipv6Packet::parse_mut(&mut buffer, ()).unwrap();
1649        let mut expected_body: Vec<u8> = Vec::new();
1650        expected_body.extend(0..15);
1651        assert_eq!(packet.body(), &expected_body[..]);
1652        assert_eq!(core_ctx.state.cache.size, 0);
1653    }
1654
1655    #[ip_test(I)]
1656    fn test_ip_reassembly_with_multiple_intertwined_packets<
1657        I: TestIpExt + netstack3_base::IpExt,
1658    >() {
1659        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1660        let fragment_id_0 = 5;
1661        let fragment_id_1 = 10;
1662
1663        // Test that we properly reassemble fragmented packets when they arrive
1664        // intertwined with other packets' fragments.
1665
1666        // Process fragment #0 for packet #0
1667        process_ip_fragment(
1668            &mut core_ctx,
1669            &mut bindings_ctx,
1670            fragment_id_0,
1671            0,
1672            true,
1673            ExpectedResult::NeedMore,
1674        );
1675
1676        // Process fragment #0 for packet #1
1677        process_ip_fragment(
1678            &mut core_ctx,
1679            &mut bindings_ctx,
1680            fragment_id_1,
1681            0,
1682            true,
1683            ExpectedResult::NeedMore,
1684        );
1685
1686        // Process fragment #1 for packet #0
1687        process_ip_fragment(
1688            &mut core_ctx,
1689            &mut bindings_ctx,
1690            fragment_id_0,
1691            1,
1692            true,
1693            ExpectedResult::NeedMore,
1694        );
1695
1696        // Process fragment #1 for packet #0
1697        process_ip_fragment(
1698            &mut core_ctx,
1699            &mut bindings_ctx,
1700            fragment_id_1,
1701            1,
1702            true,
1703            ExpectedResult::NeedMore,
1704        );
1705
1706        // Process fragment #2 for packet #0
1707        process_ip_fragment(
1708            &mut core_ctx,
1709            &mut bindings_ctx,
1710            fragment_id_0,
1711            2,
1712            false,
1713            ExpectedResult::Ready { total_body_len: 24 },
1714        );
1715
1716        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, fragment_id_0, 24);
1717
1718        // Process fragment #2 for packet #1
1719        process_ip_fragment(
1720            &mut core_ctx,
1721            &mut bindings_ctx,
1722            fragment_id_1,
1723            2,
1724            false,
1725            ExpectedResult::Ready { total_body_len: 24 },
1726        );
1727
1728        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, fragment_id_1, 24);
1729    }
1730
1731    #[ip_test(I)]
1732    fn test_ip_reassembly_timer_with_multiple_intertwined_packets<
1733        I: TestIpExt + netstack3_base::IpExt,
1734    >() {
1735        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1736        let fragment_id_0 = 5;
1737        let fragment_id_1 = 10;
1738        let fragment_id_2 = 15;
1739
1740        // Test that we properly timer with multiple intertwined packets that
1741        // all arrive out of order. We expect packet 1 and 3 to succeed, and
1742        // packet 1 to fail due to the reassembly timer.
1743        //
1744        // The flow of events:
1745        //   T=0s:
1746        //     - Packet #0, Fragment #0 arrives (timer scheduled for T=60s).
1747        //     - Packet #1, Fragment #2 arrives (timer scheduled for T=60s).
1748        //     - Packet #2, Fragment #2 arrives (timer scheduled for T=60s).
1749        //   T=30s:
1750        //     - Packet #0, Fragment #2 arrives.
1751        //   T=40s:
1752        //     - Packet #2, Fragment #1 arrives.
1753        //     - Packet #0, Fragment #1 arrives (timer cancelled since all
1754        //       fragments arrived).
1755        //   T=50s:
1756        //     - Packet #1, Fragment #0 arrives.
1757        //     - Packet #2, Fragment #0 arrives (timer cancelled since all
1758        //       fragments arrived).
1759        //   T=60s:
1760        //     - Timeout for reassembly of Packet #1.
1761        //     - Packet #1, Fragment #1 arrives (final fragment but timer
1762        //       already triggered so fragment not complete).
1763
1764        // Process fragment #0 for packet #0
1765        process_ip_fragment(
1766            &mut core_ctx,
1767            &mut bindings_ctx,
1768            fragment_id_0,
1769            0,
1770            true,
1771            ExpectedResult::NeedMore,
1772        );
1773
1774        // Process fragment #1 for packet #1
1775        process_ip_fragment(
1776            &mut core_ctx,
1777            &mut bindings_ctx,
1778            fragment_id_1,
1779            2,
1780            false,
1781            ExpectedResult::NeedMore,
1782        );
1783
1784        // Process fragment #2 for packet #2
1785        process_ip_fragment(
1786            &mut core_ctx,
1787            &mut bindings_ctx,
1788            fragment_id_2,
1789            2,
1790            false,
1791            ExpectedResult::NeedMore,
1792        );
1793
1794        // Advance time by 30s (should be at 30s now).
1795        assert_empty(bindings_ctx.trigger_timers_for(Duration::from_secs(30), &mut core_ctx));
1796
1797        // Process fragment #2 for packet #0
1798        process_ip_fragment(
1799            &mut core_ctx,
1800            &mut bindings_ctx,
1801            fragment_id_0,
1802            2,
1803            false,
1804            ExpectedResult::NeedMore,
1805        );
1806
1807        // Advance time by 10s (should be at 40s now).
1808        assert_empty(bindings_ctx.trigger_timers_for(Duration::from_secs(10), &mut core_ctx));
1809
1810        // Process fragment #1 for packet #2
1811        process_ip_fragment(
1812            &mut core_ctx,
1813            &mut bindings_ctx,
1814            fragment_id_2,
1815            1,
1816            true,
1817            ExpectedResult::NeedMore,
1818        );
1819
1820        // Process fragment #1 for packet #0
1821        process_ip_fragment(
1822            &mut core_ctx,
1823            &mut bindings_ctx,
1824            fragment_id_0,
1825            1,
1826            true,
1827            ExpectedResult::Ready { total_body_len: 24 },
1828        );
1829
1830        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, fragment_id_0, 24);
1831
1832        // Advance time by 10s (should be at 50s now).
1833        assert_empty(bindings_ctx.trigger_timers_for(Duration::from_secs(10), &mut core_ctx));
1834
1835        // Process fragment #0 for packet #1
1836        process_ip_fragment(
1837            &mut core_ctx,
1838            &mut bindings_ctx,
1839            fragment_id_1,
1840            0,
1841            true,
1842            ExpectedResult::NeedMore,
1843        );
1844
1845        // Process fragment #0 for packet #2
1846        process_ip_fragment(
1847            &mut core_ctx,
1848            &mut bindings_ctx,
1849            fragment_id_2,
1850            0,
1851            true,
1852            ExpectedResult::Ready { total_body_len: 24 },
1853        );
1854
1855        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, fragment_id_2, 24);
1856
1857        // Advance time by 10s (should be at 60s now)), triggering the timer for
1858        // the reassembly of packet #1
1859        bindings_ctx.trigger_timers_for_and_expect(
1860            Duration::from_secs(10),
1861            [FragmentTimerId::<I>::default()],
1862            &mut core_ctx,
1863        );
1864
1865        // Make sure no other times exist.
1866        bindings_ctx.timers.assert_no_timers_installed();
1867
1868        // Process fragment #2 for packet #1 Should get a need more return value
1869        // since even though we technically received all the fragments, the last
1870        // fragment didn't arrive until after the reassembly timer.
1871        process_ip_fragment(
1872            &mut core_ctx,
1873            &mut bindings_ctx,
1874            fragment_id_1,
1875            2,
1876            true,
1877            ExpectedResult::NeedMore,
1878        );
1879    }
1880
1881    #[test]
1882    fn test_no_more_fragments_in_middle_of_block() {
1883        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1884        process_ipv4_fragment(
1885            &mut core_ctx,
1886            &mut bindings_ctx,
1887            0,
1888            100,
1889            false,
1890            ExpectedResult::NeedMore,
1891        );
1892
1893        process_ipv4_fragment(
1894            &mut core_ctx,
1895            &mut bindings_ctx,
1896            0,
1897            50,
1898            false,
1899            ExpectedResult::Invalid,
1900        );
1901    }
1902
1903    #[ip_test(I)]
1904    fn test_cancel_timer_on_overlap<I: TestIpExt>() {
1905        const FRAGMENT_ID: u16 = 1;
1906        const FRAGMENT_OFFSET: u16 = 0;
1907        const M_FLAG: bool = true;
1908
1909        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1910
1911        let TestAddrs { local_ip, remote_ip, .. } = I::TEST_ADDRS;
1912        let key = FragmentCacheKey::new(remote_ip.get(), local_ip.get(), FRAGMENT_ID.into());
1913
1914        // Do this a couple times to make sure that new packets matching the
1915        // invalid packet's fragment cache key create a new entry.
1916        for _ in 0..=2 {
1917            process_ip_fragment(
1918                &mut core_ctx,
1919                &mut bindings_ctx,
1920                FRAGMENT_ID,
1921                FRAGMENT_OFFSET,
1922                M_FLAG,
1923                ExpectedResult::NeedMore,
1924            );
1925            core_ctx
1926                .state
1927                .cache
1928                .timers
1929                .assert_timers_after(&mut bindings_ctx, [(key, (), REASSEMBLY_TIMEOUT)]);
1930
1931            process_ip_fragment(
1932                &mut core_ctx,
1933                &mut bindings_ctx,
1934                FRAGMENT_ID,
1935                FRAGMENT_OFFSET,
1936                M_FLAG,
1937                ExpectedResult::Invalid,
1938            );
1939            assert_eq!(bindings_ctx.timers.timers(), [],);
1940        }
1941    }
1942}