netstack3_ip/
reassembly.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Module for IP fragmented packet reassembly support.
6//!
7//! `reassembly` is a utility to support reassembly of fragmented IP packets.
8//! Fragmented packets are associated by a combination of the packets' source
9//! address, destination address and identification value. When a potentially
10//! fragmented packet is received, this utility will check to see if the packet
11//! is in fact fragmented or not. If it isn't fragmented, it will be returned as
12//! is without any modification. If it is fragmented, this utility will capture
13//! its body and store it in a cache while waiting for all the fragments for a
14//! packet to arrive. The header information from a fragment with offset set to
15//! 0 will also be kept to add to the final, reassembled packet. Once this
16//! utility has received all the fragments for a combination of source address,
17//! destination address and identification value, the implementer will need to
18//! allocate a buffer of sufficient size to reassemble the final packet into and
19//! pass it to this utility. This utility will then attempt to reassemble and
20//! parse the packet, which will be returned to the caller. The caller should
21//! then handle the returned packet as a normal IP packet. Note, there is a
22//! timer from receipt of the first fragment to reassembly of the final packet.
23//! See [`REASSEMBLY_TIMEOUT_SECONDS`].
24//!
25//! Note, this utility does not support reassembly of jumbogram packets.
26//! According to the IPv6 Jumbogram RFC (RFC 2675), the jumbogram payload option
27//! is relevant only for nodes that may be attached to links with a link MTU
28//! greater than 65575 bytes. Note, the maximum size of a non-jumbogram IPv6
29//! packet is also 65575 (as the payload length field for IP packets is 16 bits
30//! + 40 byte IPv6 header). If a link supports an MTU greater than the maximum
31//! size of a non-jumbogram packet, the packet should not be fragmented.
32
33use alloc::collections::{BTreeSet, BinaryHeap};
34use alloc::vec::Vec;
35use core::cmp::Ordering;
36use core::fmt::Debug;
37use core::hash::Hash;
38use core::time::Duration;
39
40use assert_matches::assert_matches;
41use log::debug;
42use net_types::ip::{GenericOverIp, Ip, IpAddr, IpVersionMarker, Ipv4, Ipv6};
43use netstack3_base::{
44    CoreTimerContext, HandleableTimer, InstantBindingsTypes, IpExt, LocalTimerHeap,
45    TimerBindingsTypes, TimerContext,
46};
47use netstack3_hashmap::hash_map::{Entry, HashMap};
48use packet::BufferViewMut;
49use packet_formats::ip::{IpPacket, Ipv4Proto};
50use packet_formats::ipv4::{Ipv4Header, Ipv4Packet};
51use packet_formats::ipv6::Ipv6Packet;
52use packet_formats::ipv6::ext_hdrs::Ipv6ExtensionHeaderData;
53use zerocopy::{SplitByteSlice, SplitByteSliceMut};
54
55/// An IP extension trait supporting reassembly of fragments.
56pub trait ReassemblyIpExt: IpExt {
57    /// The maximum amount of time from receipt of the first fragment to
58    /// reassembly of a packet. Note, "first fragment" does not mean a fragment
59    /// with offset 0; it means the first fragment packet we receive with a new
60    /// combination of source address, destination address and fragment
61    /// identification value.
62    const REASSEMBLY_TIMEOUT: Duration;
63
64    /// An IP specific field that should be considered part of the
65    /// [`FragmentCacheKey`].
66    type FragmentCacheKeyPart: Copy + Clone + Debug + Hash + PartialEq + Eq;
67
68    /// Returns the IP specific portion of the [`FragmentCacheKey`] from the
69    /// packet.
70    fn ip_specific_key_part<B: SplitByteSlice>(
71        packet: &Self::Packet<B>,
72    ) -> Self::FragmentCacheKeyPart;
73}
74
75impl ReassemblyIpExt for Ipv4 {
76    /// This value is specified in RFC 729, section 3.1:
77    ///   The current recommendation for the initial timer setting is 15
78    ///   seconds.
79    const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(15);
80
81    /// IPv4 considers the inner protocol to be part of the fragmentation key.
82    /// From RFC 791, section 2.3:
83    ///   To assemble the fragments of an internet datagram, an internet
84    ///   protocol module (for example at a destination host) combines
85    ///   internet datagrams that all have the same value for the four fields:
86    ///   identification, source, destination, and protocol.
87    type FragmentCacheKeyPart = Ipv4Proto;
88
89    fn ip_specific_key_part<B: SplitByteSlice>(
90        packet: &Self::Packet<B>,
91    ) -> Self::FragmentCacheKeyPart {
92        IpPacket::proto(packet)
93    }
94}
95
96impl ReassemblyIpExt for Ipv6 {
97    /// This value is specified in RFC 8200, section 4.5:
98    ///   If insufficient fragments are received to complete reassembly
99    ///   of a packet within 60 seconds of the reception of the first-
100    ///   arriving fragment of that packet, reassembly of that packet
101    ///   must be abandoned and all the fragments that have been received
102    ///   for that packet must be discarded.
103    const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(60);
104
105    /// Unlike IPv4, IPv6 allows reassembling fragments that have different
106    /// inner protocols. From RFC 8200, section 4.5:
107    ///   The Next Header values in the Fragment headers of different
108    ///   fragments of the same original packet may differ.  Only the value
109    ///   from the Offset zero fragment packet is used for reassembly.
110    type FragmentCacheKeyPart = ();
111
112    fn ip_specific_key_part<B: SplitByteSlice>(
113        _packet: &Self::Packet<B>,
114    ) -> Self::FragmentCacheKeyPart {
115        ()
116    }
117}
118
119/// Number of bytes per fragment block for IPv4 and IPv6.
120///
121/// IPv4 outlines the fragment block size in RFC 791 section 3.1, under the
122/// fragment offset field's description: "The fragment offset is measured in
123/// units of 8 octets (64 bits)".
124///
125/// IPv6 outlines the fragment block size in RFC 8200 section 4.5, under the
126/// fragment offset field's description: "The offset, in 8-octet units, of the
127/// data following this header".
128const FRAGMENT_BLOCK_SIZE: u8 = 8;
129
130/// Maximum number of fragment blocks an IPv4 or IPv6 packet can have.
131///
132/// We use this value because both IPv4 fixed header's fragment offset field and
133/// IPv6 fragment extension header's fragment offset field are 13 bits wide.
134const MAX_FRAGMENT_BLOCKS: u16 = 8191;
135
136/// Maximum number of bytes of all currently cached fragments per IP protocol.
137///
138/// If the current cache size is less than this number, a new fragment can be
139/// cached (even if this will result in the total cache size exceeding this
140/// threshold). If the current cache size >= this number, the incoming fragment
141/// will be dropped.
142const MAX_FRAGMENT_CACHE_SIZE: usize = 4 * 1024 * 1024;
143
144/// The state context for the fragment cache.
145pub trait FragmentContext<I: Ip, BT: FragmentBindingsTypes> {
146    /// Returns a mutable reference to the fragment cache.
147    fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O;
148}
149
150/// The bindings types for IP packet fragment reassembly.
151pub trait FragmentBindingsTypes: TimerBindingsTypes + InstantBindingsTypes {}
152impl<BT> FragmentBindingsTypes for BT where BT: TimerBindingsTypes + InstantBindingsTypes {}
153
154/// The bindings execution context for IP packet fragment reassembly.
155pub trait FragmentBindingsContext: TimerContext + FragmentBindingsTypes {}
156impl<BC> FragmentBindingsContext for BC where BC: TimerContext + FragmentBindingsTypes {}
157
158/// The timer ID for the fragment cache.
159#[derive(Hash, Eq, PartialEq, Default, Clone, Debug, GenericOverIp)]
160#[generic_over_ip(I, Ip)]
161pub struct FragmentTimerId<I: Ip>(IpVersionMarker<I>);
162
163/// An implementation of a fragment cache.
164pub trait FragmentHandler<I: ReassemblyIpExt, BC> {
165    /// Attempts to process a packet fragment.
166    ///
167    /// # Panics
168    ///
169    /// Panics if the packet has no fragment data.
170    fn process_fragment<B: SplitByteSlice>(
171        &mut self,
172        bindings_ctx: &mut BC,
173        packet: I::Packet<B>,
174    ) -> FragmentProcessingState<I, B>
175    where
176        I::Packet<B>: FragmentablePacket;
177
178    /// Attempts to reassemble a packet.
179    ///
180    /// Attempts to reassemble a packet associated with a given
181    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
182    /// data. The caller is expected to allocate a buffer of sufficient size
183    /// (available from `process_fragment` when it returns a
184    /// `FragmentProcessingState::Ready` value) and provide it to
185    /// `reassemble_packet` as `buffer` where the packet will be reassembled
186    /// into.
187    ///
188    /// # Panics
189    ///
190    /// Panics if the provided `buffer` does not have enough capacity for the
191    /// reassembled packet. Also panics if a different `ctx` is passed to
192    /// `reassemble_packet` from the one passed to `process_fragment` when
193    /// processing a packet with a given `key` as `reassemble_packet` will fail
194    /// to cancel the reassembly timer.
195    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
196        &mut self,
197        bindings_ctx: &mut BC,
198        key: &FragmentCacheKey<I>,
199        buffer: BV,
200    ) -> Result<(), FragmentReassemblyError>;
201}
202
203impl<I: IpExt + ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
204    FragmentHandler<I, BC> for CC
205{
206    fn process_fragment<B: SplitByteSlice>(
207        &mut self,
208        bindings_ctx: &mut BC,
209        packet: I::Packet<B>,
210    ) -> FragmentProcessingState<I, B>
211    where
212        I::Packet<B>: FragmentablePacket,
213    {
214        self.with_state_mut(|cache| {
215            let (res, timer_action) = cache.process_fragment(packet);
216
217            if let Some(timer_action) = timer_action {
218                match timer_action {
219                    // TODO(https://fxbug.dev/414413500): for IPv4, use the
220                    // fragment's TTL to determine the timeout.
221                    CacheTimerAction::CreateNewTimer(key) => {
222                        assert_eq!(
223                            cache.timers.schedule_after(
224                                bindings_ctx,
225                                key,
226                                (),
227                                I::REASSEMBLY_TIMEOUT,
228                            ),
229                            None
230                        )
231                    }
232                    CacheTimerAction::CancelExistingTimer(key) => {
233                        assert_ne!(cache.timers.cancel(bindings_ctx, &key), None)
234                    }
235                }
236            }
237
238            res
239        })
240    }
241
242    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
243        &mut self,
244        bindings_ctx: &mut BC,
245        key: &FragmentCacheKey<I>,
246        buffer: BV,
247    ) -> Result<(), FragmentReassemblyError> {
248        self.with_state_mut(|cache| {
249            let res = cache.reassemble_packet(key, buffer);
250
251            match res {
252                Ok(_) | Err(FragmentReassemblyError::PacketParsingError) => {
253                    // Cancel the reassembly timer as we attempt reassembly which
254                    // means we had all the fragments for the final packet, even
255                    // if parsing the reassembled packet failed.
256                    assert_matches!(cache.timers.cancel(bindings_ctx, key), Some(_));
257                }
258                Err(FragmentReassemblyError::InvalidKey)
259                | Err(FragmentReassemblyError::MissingFragments) => {}
260            }
261
262            res
263        })
264    }
265}
266
267impl<I: ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
268    HandleableTimer<CC, BC> for FragmentTimerId<I>
269{
270    fn handle(self, core_ctx: &mut CC, bindings_ctx: &mut BC, _: BC::UniqueTimerId) {
271        let Self(IpVersionMarker { .. }) = self;
272        core_ctx.with_state_mut(|cache| {
273            let Some((key, ())) = cache.timers.pop(bindings_ctx) else {
274                return;
275            };
276
277            // If a timer fired, the `key` must still exist in our fragment cache.
278            let FragmentCacheData { missing_blocks: _, body_fragments, header: _, total_size } =
279                assert_matches!(cache.remove_data(&key), Some(c) => c);
280            debug!(
281                "reassembly for {key:?} \
282                timed out with {} fragments and {total_size} bytes",
283                body_fragments.len(),
284            );
285        });
286    }
287}
288
289/// Trait that must be implemented by any packet type that is fragmentable.
290pub trait FragmentablePacket {
291    /// Return fragment identifier data.
292    ///
293    /// Returns the fragment identification, offset and more flag as `(a, b, c)`
294    /// where `a` is the fragment identification value, `b` is the fragment
295    /// offset and `c` is the more flag.
296    ///
297    /// # Panics
298    ///
299    /// Panics if the packet has no fragment data.
300    fn fragment_data(&self) -> (u32, u16, bool);
301}
302
303impl<B: SplitByteSlice> FragmentablePacket for Ipv4Packet<B> {
304    fn fragment_data(&self) -> (u32, u16, bool) {
305        (u32::from(self.id()), self.fragment_offset().into_raw(), self.mf_flag())
306    }
307}
308
309impl<B: SplitByteSlice> FragmentablePacket for Ipv6Packet<B> {
310    fn fragment_data(&self) -> (u32, u16, bool) {
311        for ext_hdr in self.iter_extension_hdrs() {
312            if let Ipv6ExtensionHeaderData::Fragment { fragment_data } = ext_hdr.data() {
313                return (
314                    fragment_data.identification(),
315                    fragment_data.fragment_offset().into_raw(),
316                    fragment_data.m_flag(),
317                );
318            }
319        }
320
321        unreachable!(
322            "Should never call this function if the packet does not have a fragment header"
323        );
324    }
325}
326
327/// Possible return values for [`IpPacketFragmentCache::process_fragment`].
328#[derive(Debug)]
329pub enum FragmentProcessingState<I: ReassemblyIpExt, B: SplitByteSlice> {
330    /// The provided packet is not fragmented so no processing is required.
331    /// The packet is returned with this value without any modification.
332    NotNeeded(I::Packet<B>),
333
334    /// The provided packet is fragmented but it is malformed.
335    ///
336    /// Possible reasons for being malformed are:
337    ///  1) Body is not a multiple of `FRAGMENT_BLOCK_SIZE` and  it is not the
338    ///     last fragment (last fragment of a packet, not last fragment received
339    ///     for a packet).
340    ///  2) Overlaps with an existing fragment. This is explicitly not allowed
341    ///     for IPv6 as per RFC 8200 section 4.5 (more details in RFC 5722). We
342    ///     choose the same behaviour for IPv4 for the same reasons.
343    ///  3) Packet's fragment offset + # of fragment blocks >
344    ///     `MAX_FRAGMENT_BLOCKS`.
345    InvalidFragment,
346
347    /// Successfully processed the provided fragment. We are still waiting on
348    /// more fragments for a packet to arrive before being ready to reassemble
349    /// the packet.
350    NeedMoreFragments,
351
352    /// Cannot process the fragment because `MAX_FRAGMENT_CACHE_SIZE` is
353    /// reached.
354    OutOfMemory,
355
356    /// Successfully processed the provided fragment. We now have all the
357    /// fragments we need to reassemble the packet. The caller must create a
358    /// buffer with capacity for at least `packet_len` bytes and provide the
359    /// buffer and `key` to `reassemble_packet`.
360    Ready { key: FragmentCacheKey<I>, packet_len: usize },
361}
362
363/// Possible errors when attempting to reassemble a packet.
364#[derive(Debug, PartialEq, Eq)]
365pub enum FragmentReassemblyError {
366    /// At least one fragment for a packet has not arrived.
367    MissingFragments,
368
369    /// A `FragmentCacheKey` is not associated with any packet. This could be
370    /// because either no fragment has yet arrived for a packet associated with
371    /// a `FragmentCacheKey` or some fragments did arrive, but the reassembly
372    /// timer expired and got discarded.
373    InvalidKey,
374
375    /// Packet parsing error.
376    PacketParsingError,
377}
378
379/// Fragment Cache Key.
380///
381/// Composed of the original packet's source address, destination address,
382/// and fragment id.
383#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
384pub struct FragmentCacheKey<I: ReassemblyIpExt> {
385    src_ip: I::Addr,
386    dst_ip: I::Addr,
387    fragment_id: u32,
388    ip_specific_fields: I::FragmentCacheKeyPart,
389}
390
391/// An inclusive-inclusive range of bytes within a reassembled packet.
392// NOTE: We use this instead of `std::ops::RangeInclusive` because the latter
393// provides getter methods which return references, and it adds a lot of
394// unnecessary dereferences.
395#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
396struct BlockRange {
397    start: u16,
398    end: u16,
399}
400
401/// Data required for fragmented packet reassembly.
402#[derive(Debug)]
403struct FragmentCacheData {
404    /// List of non-overlapping inclusive ranges of fragment blocks required
405    /// before being ready to reassemble a packet.
406    ///
407    /// When creating a new instance of `FragmentCacheData`, we will set
408    /// `missing_blocks` to a list with a single element representing all
409    /// blocks, (0, MAX_VALUE). In this case, MAX_VALUE will be set to
410    /// `core::u16::MAX`.
411    missing_blocks: BTreeSet<BlockRange>,
412
413    /// Received fragment blocks.
414    ///
415    /// We use a binary heap for help when reassembling packets. When we
416    /// reassemble packets, we will want to fill up a new buffer with all the
417    /// body fragments. The easiest way to do this is in order, from the
418    /// fragment with offset 0 to the fragment with the highest offset. Since we
419    /// only need to enforce the order when reassembling, we use a min-heap so
420    /// we have a defined order (increasing fragment offset values) when
421    /// popping. `BinaryHeap` is technically a max-heap, but we use the negative
422    /// of the offset values as the key for the heap. See
423    /// [`PacketBodyFragment::new`].
424    body_fragments: BinaryHeap<PacketBodyFragment>,
425
426    /// The header data for the reassembled packet.
427    ///
428    /// The header of the fragment packet with offset 0 will be used as the
429    /// header for the final, reassembled packet.
430    header: Option<Vec<u8>>,
431
432    /// Total number of bytes in the reassembled packet.
433    ///
434    /// This is used so that we don't have to iterated through `body_fragments`
435    /// and sum the partial body sizes to calculate the reassembled packet's
436    /// size.
437    total_size: usize,
438}
439
440impl Default for FragmentCacheData {
441    fn default() -> FragmentCacheData {
442        FragmentCacheData {
443            missing_blocks: core::iter::once(BlockRange { start: 0, end: u16::MAX }).collect(),
444            body_fragments: BinaryHeap::new(),
445            header: None,
446            total_size: 0,
447        }
448    }
449}
450
451impl FragmentCacheData {
452    /// Attempts to find a gap where the provided `BlockRange` will fit in.
453    fn find_gap(&self, BlockRange { start, end }: BlockRange) -> FindGapResult {
454        let result = self.missing_blocks.iter().find_map(|gap| {
455            // This gap completely contains the provided range.
456            if gap.start <= start && gap.end >= end {
457                return Some(FindGapResult::Found { gap: *gap });
458            }
459
460            // This gap is completely disjoint from the provided range.
461            // Ignore it.
462            if gap.start > end || gap.end < start {
463                return None;
464            }
465
466            // If neither of the above are true, this gap must overlap with
467            // the provided range.
468            return Some(FindGapResult::Overlap);
469        });
470
471        match result {
472            Some(result) => result,
473            None => {
474                // Searching the missing blocks didn't find a suitable gap nor
475                // an overlap. Check for an out-of-bounds range before
476                // concluding that this range must be a duplicate.
477
478                // Note: `last` *must* exist and *must* represent the final
479                // fragment. If we had not yet received the final fragment, the
480                // search through the `missing_blocks` would be guaranteed to
481                // return `Some` (because it would contain a range with an end
482                // equal to u16::Max).
483                let last = self.body_fragments.peek().unwrap();
484                if last.offset < start {
485                    FindGapResult::OutOfBounds
486                } else {
487                    FindGapResult::Duplicate
488                }
489            }
490        }
491    }
492}
493
494/// The result of calling [`FragmentCacheData::find_gap`].
495enum FindGapResult {
496    // The provided `BlockRange` fits inside of an existing gap. The gap may be
497    // completely or partially filled by the provided `BlockRange`.
498    Found {
499        gap: BlockRange,
500    },
501    // The provided `BlockRange` overlaps with data we've already received.
502    // Specifically, an overlap occurs if the provided `BlockRange` is partially
503    // contained within a gap.
504    Overlap,
505    /// The provided `BlockRange` has an end beyond the known end of the packet.
506    OutOfBounds,
507    // The provided `BlockRange` has already been received. Specifically, a
508    // duplicate occurs if the provided `BlockRange` is completely disjoint from
509    // all known gaps.
510    //
511    // RFC 8200, Section 4.5 states:
512    //   It should be noted that fragments may be duplicated in the
513    //   network.  Instead of treating these exact duplicate fragments
514    //   as overlapping fragments, an implementation may choose to
515    //   detect this case and drop exact duplicate fragments while
516    //   keeping the other fragments belonging to the same packet.
517    //
518    // Here we take a loose interpretation of "exact" and choose not to verify
519    // that the *data* contained within the fragment matches the previously
520    // received data. This is in the spirit of reducing the work performed by
521    // the assembler, and is in line with the behavior of other platforms.
522    Duplicate,
523}
524
525/// A cache of inbound IP packet fragments.
526#[derive(Debug)]
527pub struct IpPacketFragmentCache<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
528    cache: HashMap<FragmentCacheKey<I>, FragmentCacheData>,
529    size: usize,
530    threshold: usize,
531    timers: LocalTimerHeap<FragmentCacheKey<I>, (), BT>,
532}
533
534impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> IpPacketFragmentCache<I, BC> {
535    /// Creates a new `IpFragmentCache`.
536    pub fn new<CC: CoreTimerContext<FragmentTimerId<I>, BC>>(
537        bindings_ctx: &mut BC,
538    ) -> IpPacketFragmentCache<I, BC> {
539        IpPacketFragmentCache {
540            cache: HashMap::new(),
541            size: 0,
542            threshold: MAX_FRAGMENT_CACHE_SIZE,
543            timers: LocalTimerHeap::new(bindings_ctx, CC::convert_timer(Default::default())),
544        }
545    }
546}
547
548enum CacheTimerAction<I: ReassemblyIpExt> {
549    CreateNewTimer(FragmentCacheKey<I>),
550    CancelExistingTimer(FragmentCacheKey<I>),
551}
552
553impl<I: ReassemblyIpExt, BT: FragmentBindingsTypes> IpPacketFragmentCache<I, BT> {
554    /// Attempts to process a packet fragment.
555    ///
556    /// # Panics
557    ///
558    /// Panics if the packet has no fragment data.
559    fn process_fragment<B: SplitByteSlice>(
560        &mut self,
561        packet: I::Packet<B>,
562    ) -> (FragmentProcessingState<I, B>, Option<CacheTimerAction<I>>)
563    where
564        I::Packet<B>: FragmentablePacket,
565    {
566        if self.above_size_threshold() {
567            return (FragmentProcessingState::OutOfMemory, None);
568        }
569
570        // Get the fragment data.
571        let (id, offset, m_flag) = packet.fragment_data();
572
573        // Check if `packet` is actually fragmented. We know it is not
574        // fragmented if the fragment offset is 0 (contains first fragment) and
575        // we have no more fragments. This means the first fragment is the only
576        // fragment, implying we have a full packet.
577        if offset == 0 && !m_flag {
578            return (FragmentProcessingState::NotNeeded(packet), None);
579        }
580
581        // Make sure packet's body isn't empty. Since at this point we know that
582        // the packet is definitely fragmented (`offset` is not 0 or `m_flag` is
583        // `true`), we simply let the caller know we need more fragments. This
584        // should never happen, but just in case :).
585        if packet.body().is_empty() {
586            return (FragmentProcessingState::NeedMoreFragments, None);
587        }
588
589        // Make sure body is a multiple of `FRAGMENT_BLOCK_SIZE` bytes, or
590        // `packet` contains the last fragment block which is allowed to be less
591        // than `FRAGMENT_BLOCK_SIZE` bytes.
592        if m_flag && (packet.body().len() % (FRAGMENT_BLOCK_SIZE as usize) != 0) {
593            return (FragmentProcessingState::InvalidFragment, None);
594        }
595
596        // Key used to find this connection's fragment cache data.
597        let key = FragmentCacheKey {
598            src_ip: packet.src_ip(),
599            dst_ip: packet.dst_ip(),
600            fragment_id: id,
601            ip_specific_fields: I::ip_specific_key_part(&packet),
602        };
603
604        // The number of fragment blocks `packet` contains.
605        //
606        // Note, we are calculating the ceiling of an integer division.
607        // Essentially:
608        //     ceil(packet.body.len() / FRAGMENT_BLOCK_SIZE)
609        //
610        // We need to calculate the ceiling of the division because the final
611        // fragment block for a reassembled packet is allowed to contain less
612        // than `FRAGMENT_BLOCK_SIZE` bytes.
613        //
614        // We know `packet.body().len() - 1` will never be less than 0 because
615        // we already made sure that `packet`'s body is not empty, and it is
616        // impossible to have a negative body size.
617        let num_fragment_blocks = 1 + ((packet.body().len() - 1) / (FRAGMENT_BLOCK_SIZE as usize));
618        assert!(num_fragment_blocks > 0);
619
620        // The range of fragment blocks `packet` contains.
621        //
622        // The maximum number of fragment blocks a reassembled packet is allowed
623        // to contain is `MAX_FRAGMENT_BLOCKS` so we make sure that the fragment
624        // we received does not violate this.
625        let fragment_blocks_range =
626            if let Ok(offset_end) = u16::try_from((offset as usize) + num_fragment_blocks - 1) {
627                if offset_end <= MAX_FRAGMENT_BLOCKS {
628                    BlockRange { start: offset, end: offset_end }
629                } else {
630                    return (FragmentProcessingState::InvalidFragment, None);
631                }
632            } else {
633                return (FragmentProcessingState::InvalidFragment, None);
634            };
635
636        // Get (or create) the fragment cache data.
637        let (fragment_data, timer_not_yet_scheduled) = self.get_or_create(key);
638
639        // Find the gap where `packet` belongs.
640        let found_gap = match fragment_data.find_gap(fragment_blocks_range) {
641            FindGapResult::Overlap | FindGapResult::OutOfBounds => {
642                // Drop all reassembly data as per RFC 8200 section 4.5 (IPv6).
643                // See RFC 5722 for more information.
644                //
645                // IPv4 (RFC 791) does not specify what to do for overlapped
646                // fragments. RFC 1858 section 4.2 outlines a way to prevent an
647                // overlapping fragment attack for IPv4, but this is primarily
648                // for IP filtering since "no standard requires that an
649                // overlap-safe reassemble algorithm be used" on hosts. In
650                // practice, non-malicious nodes should not intentionally send
651                // data for the same fragment block multiple times, so we will
652                // do the same thing as IPv6 in this case.
653                assert_matches!(self.remove_data(&key), Some(_));
654
655                return (
656                    FragmentProcessingState::InvalidFragment,
657                    (!timer_not_yet_scheduled)
658                        .then_some(CacheTimerAction::CancelExistingTimer(key)),
659                );
660            }
661            FindGapResult::Duplicate => {
662                // Ignore duplicate fragments as per RFC 8200 section 4.5
663                // (IPv6):
664                //   It should be noted that fragments may be duplicated in the
665                //   network.  Instead of treating these exact duplicate fragments
666                //   as overlapping fragments, an implementation may choose to
667                //   detect this case and drop exact duplicate fragments while
668                //   keeping the other fragments belonging to the same packet.
669                //
670                // Ipv4 (RFC 791) does not specify what to do for duplicate
671                // fragments. As such we choose to do the same as IPv6 in this
672                // case.
673                return (FragmentProcessingState::NeedMoreFragments, None);
674            }
675            FindGapResult::Found { gap } => gap,
676        };
677
678        let timer_id = timer_not_yet_scheduled.then_some(CacheTimerAction::CreateNewTimer(key));
679
680        // Remove `found_gap` since the gap as it exists will no longer be
681        // valid.
682        assert!(fragment_data.missing_blocks.remove(&found_gap));
683
684        // If the received fragment blocks start after the beginning of
685        // `found_gap`, create a new gap between the beginning of `found_gap`
686        // and the first fragment block contained in `packet`.
687        //
688        // Example:
689        //   `packet` w/ fragments [4, 7]
690        //                 |-----|-----|-----|-----|
691        //                    4     5     6     7
692        //
693        //   `found_gap` w/ fragments [X, 7] where 0 <= X < 4
694        //     |-----| ... |-----|-----|-----|-----|
695        //        X    ...    4     5     6     7
696        //
697        //   Here we can see that with a `found_gap` of [2, 7], `packet` covers
698        //   [4, 7] but we are still missing [X, 3] so we create a new gap of
699        //   [X, 3].
700        if found_gap.start < fragment_blocks_range.start {
701            assert!(fragment_data.missing_blocks.insert(BlockRange {
702                start: found_gap.start,
703                end: fragment_blocks_range.start - 1
704            }));
705        }
706
707        // If the received fragment blocks end before the end of `found_gap` and
708        // we expect more fragments, create a new gap between the last fragment
709        // block contained in `packet` and the end of `found_gap`.
710        //
711        // Example 1:
712        //   `packet` w/ fragments [4, 7] & m_flag = true
713        //     |-----|-----|-----|-----|
714        //        4     5     6     7
715        //
716        //   `found_gap` w/ fragments [4, Y] where 7 < Y <= `MAX_FRAGMENT_BLOCKS`.
717        //     |-----|-----|-----|-----| ... |-----|
718        //        4     5     6     7    ...    Y
719        //
720        //   Here we can see that with a `found_gap` of [4, Y], `packet` covers
721        //   [4, 7] but we still expect more fragment blocks after the blocks in
722        //   `packet` (as noted by `m_flag`) so we are still missing [8, Y] so
723        //   we create a new gap of [8, Y].
724        //
725        // Example 2:
726        //   `packet` w/ fragments [4, 7] & m_flag = false
727        //     |-----|-----|-----|-----|
728        //        4     5     6     7
729        //
730        //   `found_gap` w/ fragments [4, Y] where MAX = `MAX_FRAGMENT_BLOCKS`.
731        //     |-----|-----|-----|-----| ... |-----|
732        //        4     5     6     7    ...   MAX
733        //
734        //   Here we can see that with a `found_gap` of [4, MAX], `packet`
735        //   covers [4, 7] and we don't expect more fragment blocks after the
736        //   blocks in `packet` (as noted by `m_flag`) so we don't create a new
737        //   gap. Note, if we encounter a `packet` where `m_flag` is false,
738        //   `found_gap`'s end value must be MAX because we should only ever not
739        //   create a new gap where the end is MAX when we are processing a
740        //   packet with the last fragment block.
741        if found_gap.end > fragment_blocks_range.end && m_flag {
742            assert!(
743                fragment_data.missing_blocks.insert(BlockRange {
744                    start: fragment_blocks_range.end + 1,
745                    end: found_gap.end
746                })
747            );
748        } else if found_gap.end > fragment_blocks_range.end && !m_flag && found_gap.end < u16::MAX {
749            // There is another fragment after this one that is already present
750            // in the cache. That means that this fragment can't be the last
751            // one (must have `m_flag` set).
752            return (FragmentProcessingState::InvalidFragment, timer_id);
753        } else {
754            // Make sure that if we are not adding a fragment after the packet,
755            // it is because `packet` goes up to the `found_gap`'s end boundary,
756            // or this is the last fragment. If it is the last fragment for a
757            // packet, we make sure that `found_gap`'s end value is
758            // `core::u16::MAX`.
759            assert!(
760                found_gap.end == fragment_blocks_range.end
761                    || (!m_flag && found_gap.end == u16::MAX),
762                "found_gap: {:?}, fragment_blocks_range: {:?} offset: {:?}, m_flag: {:?}",
763                found_gap,
764                fragment_blocks_range,
765                offset,
766                m_flag
767            );
768        }
769
770        let mut added_bytes = 0;
771        // Get header buffer from `packet` if its fragment offset equals to 0.
772        if offset == 0 {
773            assert_eq!(fragment_data.header, None);
774            let header = get_header::<B, I>(&packet);
775            added_bytes = header.len();
776            fragment_data.header = Some(header);
777        }
778
779        // Add our `packet`'s body to the store of body fragments.
780        let mut body = Vec::with_capacity(packet.body().len());
781        body.extend_from_slice(packet.body());
782        added_bytes += body.len();
783        fragment_data.total_size += added_bytes;
784        fragment_data.body_fragments.push(PacketBodyFragment::new(offset, body));
785
786        // If we still have missing fragments, let the caller know that we are
787        // still waiting on some fragments. Otherwise, we let them know we are
788        // ready to reassemble and give them a key and the final packet length
789        // so they can allocate a sufficient buffer and call
790        // `reassemble_packet`.
791        let result = if fragment_data.missing_blocks.is_empty() {
792            FragmentProcessingState::Ready { key, packet_len: fragment_data.total_size }
793        } else {
794            FragmentProcessingState::NeedMoreFragments
795        };
796
797        self.increment_size(added_bytes);
798        (result, timer_id)
799    }
800
801    /// Attempts to reassemble a packet.
802    ///
803    /// Attempts to reassemble a packet associated with a given
804    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
805    /// data. The caller is expected to allocate a buffer of sufficient size
806    /// (available from `process_fragment` when it returns a
807    /// `FragmentProcessingState::Ready` value) and provide it to
808    /// `reassemble_packet` as `buffer` where the packet will be reassembled
809    /// into.
810    ///
811    /// # Panics
812    ///
813    /// Panics if the provided `buffer` does not have enough capacity for the
814    /// reassembled packet. Also panics if a different `ctx` is passed to
815    /// `reassemble_packet` from the one passed to `process_fragment` when
816    /// processing a packet with a given `key` as `reassemble_packet` will fail
817    /// to cancel the reassembly timer.
818    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
819        &mut self,
820        key: &FragmentCacheKey<I>,
821        buffer: BV,
822    ) -> Result<(), FragmentReassemblyError> {
823        let entry = match self.cache.entry(*key) {
824            Entry::Occupied(entry) => entry,
825            Entry::Vacant(_) => return Err(FragmentReassemblyError::InvalidKey),
826        };
827
828        // Make sure we are not missing fragments.
829        if !entry.get().missing_blocks.is_empty() {
830            return Err(FragmentReassemblyError::MissingFragments);
831        }
832        // Remove the entry from the cache now that we've validated that we will
833        // be able to reassemble it.
834        let (_key, data) = entry.remove_entry();
835        self.size -= data.total_size;
836
837        // If we are not missing fragments, we must have header data.
838        assert_matches!(data.header, Some(_));
839
840        // TODO(https://github.com/rust-lang/rust/issues/59278): Use
841        // `BinaryHeap::into_iter_sorted`.
842        let body_fragments = data.body_fragments.into_sorted_vec().into_iter().map(|x| x.data);
843        I::Packet::reassemble_fragmented_packet(buffer, data.header.unwrap(), body_fragments)
844            .map_err(|_| FragmentReassemblyError::PacketParsingError)
845    }
846
847    /// Gets or creates a new entry in the cache for a given `key`.
848    ///
849    /// Returns a tuple whose second component indicates whether a reassembly
850    /// timer needs to be scheduled.
851    fn get_or_create(&mut self, key: FragmentCacheKey<I>) -> (&mut FragmentCacheData, bool) {
852        match self.cache.entry(key) {
853            Entry::Occupied(e) => (e.into_mut(), false),
854            Entry::Vacant(e) => {
855                // We have no reassembly data yet so this fragment is the first
856                // one associated with the given `key`. Create a new entry in
857                // the hash table and let the caller know to schedule a timer to
858                // reset the entry.
859                (e.insert(FragmentCacheData::default()), true)
860            }
861        }
862    }
863
864    fn above_size_threshold(&self) -> bool {
865        self.size >= self.threshold
866    }
867
868    fn increment_size(&mut self, sz: usize) {
869        assert!(!self.above_size_threshold());
870        self.size += sz;
871    }
872
873    fn remove_data(&mut self, key: &FragmentCacheKey<I>) -> Option<FragmentCacheData> {
874        let data = self.cache.remove(key)?;
875        self.size -= data.total_size;
876        Some(data)
877    }
878}
879
880/// Gets the header bytes for a packet.
881fn get_header<B: SplitByteSlice, I: IpExt>(packet: &I::Packet<B>) -> Vec<u8> {
882    match packet.as_ip_addr_ref() {
883        IpAddr::V4(packet) => packet.copy_header_bytes_for_fragment(),
884        IpAddr::V6(packet) => {
885            // We are guaranteed not to panic here because we will only panic if
886            // `packet` does not have a fragment extension header. We can only get
887            // here if `packet` is a fragment packet, so we know that `packet` has a
888            // fragment extension header.
889            packet.copy_header_bytes_for_fragment()
890        }
891    }
892}
893
894/// A fragment of a packet's body.
895#[derive(Debug, PartialEq, Eq)]
896struct PacketBodyFragment {
897    offset: u16,
898    data: Vec<u8>,
899}
900
901impl PacketBodyFragment {
902    /// Constructs a new `PacketBodyFragment` to be stored in a `BinaryHeap`.
903    fn new(offset: u16, data: Vec<u8>) -> Self {
904        PacketBodyFragment { offset, data }
905    }
906}
907
908// The ordering of a `PacketBodyFragment` is only dependant on the fragment
909// offset.
910impl PartialOrd for PacketBodyFragment {
911    fn partial_cmp(&self, other: &PacketBodyFragment) -> Option<Ordering> {
912        Some(self.cmp(other))
913    }
914}
915
916impl Ord for PacketBodyFragment {
917    fn cmp(&self, other: &Self) -> Ordering {
918        self.offset.cmp(&other.offset)
919    }
920}
921
922#[cfg(test)]
923mod tests {
924    use alloc::vec;
925
926    use assert_matches::assert_matches;
927    use ip_test_macro::ip_test;
928    use net_declare::{net_ip_v4, net_ip_v6};
929    use net_types::Witness;
930    use net_types::ip::{Ipv4, Ipv4Addr, Ipv6, Ipv6Addr};
931    use netstack3_base::testutil::{
932        FakeBindingsCtx, FakeCoreCtx, FakeInstant, FakeTimerCtxExt, TEST_ADDRS_V4, TEST_ADDRS_V6,
933        assert_empty,
934    };
935    use netstack3_base::{CtxPair, IntoCoreTimerCtx};
936    use packet::{Buf, PacketBuilder, ParsablePacket, ParseBuffer, Serializer};
937    use packet_formats::ip::{FragmentOffset, IpProto, Ipv6Proto};
938    use packet_formats::ipv4::Ipv4PacketBuilder;
939    use packet_formats::ipv6::{Ipv6PacketBuilder, Ipv6PacketBuilderWithFragmentHeader};
940    use test_case::test_case;
941
942    use super::*;
943
944    struct FakeFragmentContext<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
945        cache: IpPacketFragmentCache<I, BT>,
946    }
947
948    impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> FakeFragmentContext<I, BC>
949    where
950        BC::DispatchId: From<FragmentTimerId<I>>,
951    {
952        fn new(bindings_ctx: &mut BC) -> Self {
953            Self { cache: IpPacketFragmentCache::new::<IntoCoreTimerCtx>(bindings_ctx) }
954        }
955    }
956
957    type FakeCtxImpl<I> = CtxPair<FakeCoreCtxImpl<I>, FakeBindingsCtxImpl<I>>;
958    type FakeBindingsCtxImpl<I> = FakeBindingsCtx<FragmentTimerId<I>, (), (), ()>;
959    type FakeCoreCtxImpl<I> = FakeCoreCtx<FakeFragmentContext<I, FakeBindingsCtxImpl<I>>, (), ()>;
960
961    impl<I: ReassemblyIpExt> FragmentContext<I, FakeBindingsCtxImpl<I>> for FakeCoreCtxImpl<I> {
962        fn with_state_mut<
963            O,
964            F: FnOnce(&mut IpPacketFragmentCache<I, FakeBindingsCtxImpl<I>>) -> O,
965        >(
966            &mut self,
967            cb: F,
968        ) -> O {
969            cb(&mut self.state.cache)
970        }
971    }
972
973    /// The result `process_ipv4_fragment` or `process_ipv6_fragment` should
974    /// expect after processing a fragment.
975    #[derive(PartialEq)]
976    enum ExpectedResult<I: ReassemblyIpExt> {
977        /// After processing a packet fragment, we should be ready to reassemble
978        /// the packet.
979        ///
980        /// `body_fragment_blocks` is in units of `FRAGMENT_BLOCK_SIZE`.
981        Ready { body_fragment_blocks: u16, key: FragmentCacheKey<I> },
982
983        /// After processing a packet fragment, we need more packet fragments
984        /// before being ready to reassemble the packet.
985        NeedMore,
986
987        /// The packet fragment is invalid.
988        Invalid,
989
990        /// The Cache is full.
991        OutOfMemory,
992    }
993
994    /// Get an IPv4 packet builder.
995    fn get_ipv4_builder() -> Ipv4PacketBuilder {
996        Ipv4PacketBuilder::new(
997            TEST_ADDRS_V4.remote_ip,
998            TEST_ADDRS_V4.local_ip,
999            10,
1000            <Ipv4 as TestIpExt>::PROTOCOL,
1001        )
1002    }
1003
1004    /// Get an IPv6 packet builder.
1005    fn get_ipv6_builder() -> Ipv6PacketBuilder {
1006        Ipv6PacketBuilder::new(
1007            TEST_ADDRS_V6.remote_ip,
1008            TEST_ADDRS_V6.local_ip,
1009            10,
1010            <Ipv6 as TestIpExt>::PROTOCOL,
1011        )
1012    }
1013
1014    /// Validate that IpPacketFragmentCache has correct size.
1015    fn validate_size<I: ReassemblyIpExt, BT: FragmentBindingsTypes>(
1016        cache: &IpPacketFragmentCache<I, BT>,
1017    ) {
1018        let mut sz: usize = 0;
1019
1020        for v in cache.cache.values() {
1021            sz += v.total_size;
1022        }
1023
1024        assert_eq!(sz, cache.size);
1025    }
1026
1027    struct FragmentSpec {
1028        /// The ID of the fragment.
1029        id: u16,
1030        /// The offset of the fragment, in units of `FRAGMENT_BLOCK_SIZE`.
1031        offset: u16,
1032        /// The size of the fragment, in units of `FRAGMENT_BLOCK_SIZE`.
1033        size: u16,
1034        /// The value of the M flag. "True" indicates more fragments.
1035        m_flag: bool,
1036    }
1037
1038    fn expected_packet_size<I: TestIpExt>(num_fragment_blocks: u16) -> usize {
1039        usize::from(num_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE) + I::HEADER_LENGTH
1040    }
1041
1042    /// Generates and processes an IPv4 fragment packet.
1043    fn process_ipv4_fragment<CC: FragmentContext<Ipv4, BC>, BC: FragmentBindingsContext>(
1044        core_ctx: &mut CC,
1045        bindings_ctx: &mut BC,
1046        FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1047        mut builder: Ipv4PacketBuilder,
1048        expected_result: ExpectedResult<Ipv4>,
1049    ) {
1050        builder.id(id);
1051        builder.fragment_offset(FragmentOffset::new(offset).unwrap());
1052        builder.mf_flag(m_flag);
1053        let body = generate_body_fragment(
1054            id,
1055            offset,
1056            usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1057        );
1058
1059        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1060        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1061
1062        let actual_result =
1063            FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1064        match expected_result {
1065            ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1066                let (key, packet_len) = assert_matches!(
1067                    actual_result,
1068                    FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1069                );
1070                assert_eq!(key, expected_key);
1071                assert_eq!(packet_len, expected_packet_size::<Ipv4>(body_fragment_blocks));
1072            }
1073            ExpectedResult::NeedMore => {
1074                assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1075            }
1076            ExpectedResult::Invalid => {
1077                assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1078            }
1079            ExpectedResult::OutOfMemory => {
1080                assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1081            }
1082        }
1083    }
1084
1085    /// Generates and processes an IPv6 fragment packet.
1086    ///
1087    /// `fragment_offset` and `size` are both in units of `FRAGMENT_BLOCK_SIZE`.
1088    fn process_ipv6_fragment<CC: FragmentContext<Ipv6, BC>, BC: FragmentBindingsContext>(
1089        core_ctx: &mut CC,
1090        bindings_ctx: &mut BC,
1091        FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1092        builder: Ipv6PacketBuilder,
1093        expected_result: ExpectedResult<Ipv6>,
1094    ) {
1095        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1096            builder,
1097            FragmentOffset::new(offset).unwrap(),
1098            m_flag,
1099            id.into(),
1100        );
1101
1102        let body = generate_body_fragment(
1103            id,
1104            offset,
1105            usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1106        );
1107
1108        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1109        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1110
1111        let actual_result =
1112            FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1113        match expected_result {
1114            ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1115                let (key, packet_len) = assert_matches!(
1116                    actual_result,
1117                    FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1118                );
1119                assert_eq!(key, expected_key);
1120                assert_eq!(packet_len, expected_packet_size::<Ipv6>(body_fragment_blocks));
1121            }
1122            ExpectedResult::NeedMore => {
1123                assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1124            }
1125            ExpectedResult::Invalid => {
1126                assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1127            }
1128            ExpectedResult::OutOfMemory => {
1129                assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1130            }
1131        }
1132    }
1133
1134    trait TestIpExt: IpExt + netstack3_base::testutil::TestIpExt + ReassemblyIpExt {
1135        const HEADER_LENGTH: usize;
1136
1137        const PROTOCOL: Self::Proto;
1138
1139        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1140            core_ctx: &mut CC,
1141            bindings_ctx: &mut BC,
1142            spec: FragmentSpec,
1143            expected_result: ExpectedResult<Self>,
1144        );
1145    }
1146
1147    impl TestIpExt for Ipv4 {
1148        const HEADER_LENGTH: usize = packet_formats::ipv4::HDR_PREFIX_LEN;
1149
1150        const PROTOCOL: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1151
1152        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1153            core_ctx: &mut CC,
1154            bindings_ctx: &mut BC,
1155            spec: FragmentSpec,
1156            expected_result: ExpectedResult<Ipv4>,
1157        ) {
1158            process_ipv4_fragment(core_ctx, bindings_ctx, spec, get_ipv4_builder(), expected_result)
1159        }
1160    }
1161    impl TestIpExt for Ipv6 {
1162        const HEADER_LENGTH: usize = packet_formats::ipv6::IPV6_FIXED_HDR_LEN;
1163
1164        const PROTOCOL: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1165
1166        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1167            core_ctx: &mut CC,
1168            bindings_ctx: &mut BC,
1169            spec: FragmentSpec,
1170            expected_result: ExpectedResult<Ipv6>,
1171        ) {
1172            process_ipv6_fragment(core_ctx, bindings_ctx, spec, get_ipv6_builder(), expected_result)
1173        }
1174    }
1175
1176    /// Tries to reassemble the packet with the given fragment ID.
1177    ///
1178    /// `body_fragment_blocks` is in units of `FRAGMENT_BLOCK_SIZE`.
1179    fn try_reassemble_ip_packet<
1180        I: TestIpExt + netstack3_base::IpExt,
1181        CC: FragmentContext<I, BC>,
1182        BC: FragmentBindingsContext,
1183    >(
1184        core_ctx: &mut CC,
1185        bindings_ctx: &mut BC,
1186        fragment_id: u16,
1187        body_fragment_blocks: u16,
1188    ) {
1189        let mut buffer: Vec<u8> = vec![
1190            0;
1191            usize::from(body_fragment_blocks)
1192                * usize::from(FRAGMENT_BLOCK_SIZE)
1193                + I::HEADER_LENGTH
1194        ];
1195        let mut buffer = &mut buffer[..];
1196        let key = test_key(fragment_id);
1197
1198        FragmentHandler::reassemble_packet(core_ctx, bindings_ctx, &key, &mut buffer).unwrap();
1199        let packet = I::Packet::parse_mut(&mut buffer, ()).unwrap();
1200
1201        let expected_body = generate_body_fragment(
1202            fragment_id,
1203            0,
1204            usize::from(body_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE),
1205        );
1206        assert_eq!(packet.body(), &expected_body[..]);
1207    }
1208
1209    /// Generates the body of a packet with the given fragment ID, offset, and
1210    /// length.
1211    ///
1212    /// Overlapping body bytes from different calls to `generate_body_fragment`
1213    /// are guaranteed to have the same values.
1214    fn generate_body_fragment(fragment_id: u16, fragment_offset: u16, len: usize) -> Vec<u8> {
1215        // The body contains increasing byte values which start at `fragment_id`
1216        // at byte 0. This ensures that different packets with different
1217        // fragment IDs contain bodies with different byte values.
1218        let start = usize::from(fragment_id)
1219            + usize::from(fragment_offset) * usize::from(FRAGMENT_BLOCK_SIZE);
1220        (start..start + len).map(|byte| byte as u8).collect()
1221    }
1222
1223    /// Gets a `FragmentCacheKey` with hard coded test values.
1224    fn test_key<I: TestIpExt>(id: u16) -> FragmentCacheKey<I> {
1225        #[derive(GenericOverIp)]
1226        #[generic_over_ip(I, Ip)]
1227        struct Wrapper<I: ReassemblyIpExt>(I::FragmentCacheKeyPart);
1228
1229        let Wrapper(ip_specific_fields) =
1230            I::map_ip_out((), |()| Wrapper(Ipv4::PROTOCOL), |()| Wrapper(()));
1231
1232        FragmentCacheKey {
1233            src_ip: I::TEST_ADDRS.remote_ip.get(),
1234            dst_ip: I::TEST_ADDRS.local_ip.get(),
1235            fragment_id: id.into(),
1236            ip_specific_fields,
1237        }
1238    }
1239
1240    fn new_context<I: ReassemblyIpExt>() -> FakeCtxImpl<I> {
1241        FakeCtxImpl::<I>::with_default_bindings_ctx(|bindings_ctx| {
1242            FakeCoreCtxImpl::with_state(FakeFragmentContext::new(bindings_ctx))
1243        })
1244    }
1245
1246    #[test]
1247    fn test_ipv4_reassembly_not_needed() {
1248        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1249
1250        // Test that we don't attempt reassembly if the packet is not
1251        // fragmented.
1252
1253        let builder = get_ipv4_builder();
1254        let body = [1, 2, 3, 4, 5];
1255        let mut buffer =
1256            builder.wrap_body(Buf::new(body.to_vec(), ..)).serialize_vec_outer().unwrap();
1257        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1258        assert_matches!(
1259            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1260            FragmentProcessingState::NotNeeded(unfragmented) if unfragmented.body() == body
1261        );
1262    }
1263
1264    #[test]
1265    #[should_panic(
1266        expected = "internal error: entered unreachable code: Should never call this function if the packet does not have a fragment header"
1267    )]
1268    fn test_ipv6_reassembly_not_needed() {
1269        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1270
1271        // Test that we panic if we call `fragment_data` on a packet that has no
1272        // fragment data.
1273
1274        let builder = get_ipv6_builder();
1275        let mut buffer =
1276            builder.wrap_body(Buf::new(vec![1, 2, 3, 4, 5], ..)).serialize_vec_outer().unwrap();
1277        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1278        assert_matches!(
1279            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1280            FragmentProcessingState::InvalidFragment
1281        );
1282    }
1283
1284    #[ip_test(I)]
1285    #[test_case(1)]
1286    #[test_case(10)]
1287    #[test_case(100)]
1288    fn test_ip_reassembly<I: TestIpExt>(size: u16) {
1289        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1290        let id = 5;
1291
1292        // Test that we properly reassemble fragmented packets.
1293
1294        // Process fragment #0
1295        I::process_ip_fragment(
1296            &mut core_ctx,
1297            &mut bindings_ctx,
1298            FragmentSpec { id, offset: 0, size, m_flag: true },
1299            ExpectedResult::NeedMore,
1300        );
1301
1302        // Process fragment #1
1303        I::process_ip_fragment(
1304            &mut core_ctx,
1305            &mut bindings_ctx,
1306            FragmentSpec { id, offset: size, size, m_flag: true },
1307            ExpectedResult::NeedMore,
1308        );
1309
1310        // Process fragment #2
1311        I::process_ip_fragment(
1312            &mut core_ctx,
1313            &mut bindings_ctx,
1314            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1315            ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1316        );
1317
1318        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 3 * size);
1319    }
1320
1321    #[test]
1322    fn test_ipv4_key_uniqueness() {
1323        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1324
1325        const RIGHT_SRC: Ipv4Addr = net_ip_v4!("192.0.2.1");
1326        const WRONG_SRC: Ipv4Addr = net_ip_v4!("192.0.2.2");
1327
1328        const RIGHT_DST: Ipv4Addr = net_ip_v4!("192.0.2.3");
1329        const WRONG_DST: Ipv4Addr = net_ip_v4!("192.0.2.4");
1330
1331        const RIGHT_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1332        const WRONG_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Udp);
1333
1334        const RIGHT_ID: u16 = 1;
1335        const WRONG_ID: u16 = 2;
1336
1337        const TTL: u8 = 1;
1338
1339        // Process fragment #0.
1340        process_ipv4_fragment(
1341            &mut core_ctx,
1342            &mut bindings_ctx,
1343            FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1344            Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1345            ExpectedResult::NeedMore,
1346        );
1347
1348        // Process fragment #1 under a different key, and verify it doesn't
1349        // complete the packet.
1350        for (id, src, dst, proto) in [
1351            (RIGHT_ID, RIGHT_SRC, RIGHT_DST, WRONG_PROTO),
1352            (RIGHT_ID, RIGHT_SRC, WRONG_DST, RIGHT_PROTO),
1353            (RIGHT_ID, WRONG_SRC, RIGHT_DST, RIGHT_PROTO),
1354            (WRONG_ID, RIGHT_SRC, RIGHT_DST, RIGHT_PROTO),
1355        ] {
1356            process_ipv4_fragment(
1357                &mut core_ctx,
1358                &mut bindings_ctx,
1359                FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1360                Ipv4PacketBuilder::new(src, dst, TTL, proto),
1361                ExpectedResult::NeedMore,
1362            );
1363        }
1364
1365        // Finally, process fragment #1 under the correct key, and verify the
1366        // packet is completed.
1367        const KEY: FragmentCacheKey<Ipv4> = FragmentCacheKey {
1368            src_ip: RIGHT_SRC,
1369            dst_ip: RIGHT_DST,
1370            fragment_id: RIGHT_ID as u32,
1371            ip_specific_fields: RIGHT_PROTO,
1372        };
1373        process_ipv4_fragment(
1374            &mut core_ctx,
1375            &mut bindings_ctx,
1376            FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1377            Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1378            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1379        );
1380        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv4>(2)];
1381        let mut buffer = &mut buffer[..];
1382        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1383            .expect("reassembly should succeed");
1384        let _packet = Ipv4Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1385    }
1386
1387    #[test]
1388    fn test_ipv6_key_uniqueness() {
1389        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1390
1391        const RIGHT_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1392        const WRONG_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1393
1394        const RIGHT_DST: Ipv6Addr = net_ip_v6!("2001:0db8::3");
1395        const WRONG_DST: Ipv6Addr = net_ip_v6!("2001:0db8::4");
1396
1397        const RIGHT_ID: u16 = 1;
1398        const WRONG_ID: u16 = 2;
1399
1400        const TTL: u8 = 1;
1401
1402        // Process fragment #0.
1403        process_ipv6_fragment(
1404            &mut core_ctx,
1405            &mut bindings_ctx,
1406            FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1407            Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1408            ExpectedResult::NeedMore,
1409        );
1410
1411        // Process fragment #1 under a different key, and verify it doesn't
1412        // complete the packet.
1413        for (id, src, dst) in [
1414            (RIGHT_ID, RIGHT_SRC, WRONG_DST),
1415            (RIGHT_ID, WRONG_SRC, RIGHT_DST),
1416            (WRONG_ID, RIGHT_SRC, RIGHT_DST),
1417        ] {
1418            process_ipv6_fragment(
1419                &mut core_ctx,
1420                &mut bindings_ctx,
1421                FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1422                Ipv6PacketBuilder::new(src, dst, TTL, Ipv6::PROTOCOL),
1423                ExpectedResult::NeedMore,
1424            );
1425        }
1426
1427        // Finally, process fragment #1 under the correct key, and verify the
1428        // packet is completed.
1429        const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1430            src_ip: RIGHT_SRC,
1431            dst_ip: RIGHT_DST,
1432            fragment_id: RIGHT_ID as u32,
1433            ip_specific_fields: (),
1434        };
1435        process_ipv6_fragment(
1436            &mut core_ctx,
1437            &mut bindings_ctx,
1438            FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1439            Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1440            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1441        );
1442        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1443        let mut buffer = &mut buffer[..];
1444        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1445            .expect("reassembly should succeed");
1446        let _packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1447    }
1448
1449    #[test]
1450    fn test_ipv6_reassemble_different_protocols() {
1451        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1452
1453        const SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1454        const DST: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1455        const ID: u16 = 1;
1456        const TTL: u8 = 1;
1457
1458        const PROTO1: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1459        const PROTO2: Ipv6Proto = Ipv6Proto::Proto(IpProto::Udp);
1460
1461        // Process fragment #0 (uses `PROTO1`).
1462        process_ipv6_fragment(
1463            &mut core_ctx,
1464            &mut bindings_ctx,
1465            FragmentSpec { id: ID, offset: 0, size: 1, m_flag: true },
1466            Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO1),
1467            ExpectedResult::NeedMore,
1468        );
1469
1470        // Process fragment #1 (uses `PROTO2`).
1471        // The packet should successfully reassemble, using the protocol from
1472        // fragment #0 (i.e. `PROTO1`).
1473        const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1474            src_ip: SRC,
1475            dst_ip: DST,
1476            fragment_id: ID as u32,
1477            ip_specific_fields: (),
1478        };
1479        process_ipv6_fragment(
1480            &mut core_ctx,
1481            &mut bindings_ctx,
1482            FragmentSpec { id: ID, offset: 1, size: 1, m_flag: false },
1483            Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO2),
1484            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1485        );
1486        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1487        let mut buffer = &mut buffer[..];
1488        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1489            .expect("reassembly should succeed");
1490        let packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1491        assert_eq!(packet.proto(), PROTO1);
1492    }
1493
1494    #[ip_test(I)]
1495    #[test_case(1)]
1496    #[test_case(10)]
1497    #[test_case(100)]
1498    fn test_ip_reassemble_with_missing_blocks<I: TestIpExt>(size: u16) {
1499        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1500        let id = 5;
1501
1502        // Test the error we get when we attempt to reassemble with missing
1503        // fragments.
1504
1505        // Process fragment #0
1506        I::process_ip_fragment(
1507            &mut core_ctx,
1508            &mut bindings_ctx,
1509            FragmentSpec { id, offset: 0, size, m_flag: true },
1510            ExpectedResult::NeedMore,
1511        );
1512
1513        // Process fragment #2
1514        I::process_ip_fragment(
1515            &mut core_ctx,
1516            &mut bindings_ctx,
1517            FragmentSpec { id, offset: size, size, m_flag: true },
1518            ExpectedResult::NeedMore,
1519        );
1520
1521        let mut buffer: Vec<u8> = vec![0; 1];
1522        let mut buffer = &mut buffer[..];
1523        let key = test_key(id);
1524        assert_eq!(
1525            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1526                .unwrap_err(),
1527            FragmentReassemblyError::MissingFragments,
1528        );
1529    }
1530
1531    #[ip_test(I)]
1532    fn test_ip_reassemble_after_timer<I: TestIpExt>() {
1533        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1534        let id = 5;
1535        let key = test_key::<I>(id);
1536
1537        // Make sure no timers in the dispatcher yet.
1538        bindings_ctx.timers.assert_no_timers_installed();
1539        assert_eq!(core_ctx.state.cache.size, 0);
1540
1541        // Test that we properly reset fragment cache on timer.
1542
1543        // Process fragment #0
1544        I::process_ip_fragment(
1545            &mut core_ctx,
1546            &mut bindings_ctx,
1547            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1548            ExpectedResult::NeedMore,
1549        );
1550
1551        // Make sure a timer got added.
1552        core_ctx.state.cache.timers.assert_timers([(
1553            key,
1554            (),
1555            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1556        )]);
1557        validate_size(&core_ctx.state.cache);
1558
1559        // Process fragment #1
1560        I::process_ip_fragment(
1561            &mut core_ctx,
1562            &mut bindings_ctx,
1563            FragmentSpec { id, offset: 1, size: 1, m_flag: true },
1564            ExpectedResult::NeedMore,
1565        );
1566        // Make sure no new timers got added or fired.
1567        core_ctx.state.cache.timers.assert_timers([(
1568            key,
1569            (),
1570            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1571        )]);
1572        validate_size(&core_ctx.state.cache);
1573
1574        // Process fragment #2
1575        I::process_ip_fragment(
1576            &mut core_ctx,
1577            &mut bindings_ctx,
1578            FragmentSpec { id, offset: 2, size: 1, m_flag: false },
1579            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id) },
1580        );
1581        // Make sure no new timers got added or fired.
1582        core_ctx.state.cache.timers.assert_timers([(
1583            key,
1584            (),
1585            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1586        )]);
1587        validate_size(&core_ctx.state.cache);
1588
1589        // Trigger the timer (simulate a timer for the fragmented packet).
1590        assert_eq!(
1591            bindings_ctx.trigger_next_timer(&mut core_ctx),
1592            Some(FragmentTimerId::<I>::default())
1593        );
1594
1595        // Make sure no other times exist..
1596        bindings_ctx.timers.assert_no_timers_installed();
1597        assert_eq!(core_ctx.state.cache.size, 0);
1598
1599        // Attempt to reassemble the packet but get an error since the fragment
1600        // data would have been reset/cleared.
1601        let key = test_key(id);
1602        let packet_len = 44;
1603        let mut buffer: Vec<u8> = vec![0; packet_len];
1604        let mut buffer = &mut buffer[..];
1605        assert_eq!(
1606            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1607                .unwrap_err(),
1608            FragmentReassemblyError::InvalidKey,
1609        );
1610    }
1611
1612    #[ip_test(I)]
1613    #[test_case(1)]
1614    #[test_case(10)]
1615    #[test_case(100)]
1616    fn test_ip_fragment_cache_oom<I: TestIpExt>(size: u16) {
1617        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1618        let mut id = 0;
1619        const THRESHOLD: usize = 8196usize;
1620
1621        assert_eq!(core_ctx.state.cache.size, 0);
1622        core_ctx.state.cache.threshold = THRESHOLD;
1623
1624        // Test that when cache size exceeds the threshold, process_fragment
1625        // returns OOM.
1626        while core_ctx.state.cache.size + usize::from(size) <= THRESHOLD {
1627            I::process_ip_fragment(
1628                &mut core_ctx,
1629                &mut bindings_ctx,
1630                FragmentSpec { id, offset: 0, size, m_flag: true },
1631                ExpectedResult::NeedMore,
1632            );
1633            validate_size(&core_ctx.state.cache);
1634            id += 1;
1635        }
1636
1637        // Now that the cache is at or above the threshold, observe OOM.
1638        I::process_ip_fragment(
1639            &mut core_ctx,
1640            &mut bindings_ctx,
1641            FragmentSpec { id, offset: 0, size, m_flag: true },
1642            ExpectedResult::OutOfMemory,
1643        );
1644        validate_size(&core_ctx.state.cache);
1645
1646        // Trigger the timers, which will clear the cache.
1647        let _timers = bindings_ctx
1648            .trigger_timers_for(I::REASSEMBLY_TIMEOUT + Duration::from_secs(1), &mut core_ctx);
1649        assert_eq!(core_ctx.state.cache.size, 0);
1650        validate_size(&core_ctx.state.cache);
1651
1652        // Can process fragments again.
1653        I::process_ip_fragment(
1654            &mut core_ctx,
1655            &mut bindings_ctx,
1656            FragmentSpec { id, offset: 0, size, m_flag: true },
1657            ExpectedResult::NeedMore,
1658        );
1659    }
1660
1661    #[ip_test(I)]
1662    #[test_case(1)]
1663    #[test_case(10)]
1664    #[test_case(100)]
1665    fn test_unordered_fragments<I: TestIpExt>(size: u16) {
1666        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1667        let id = 5;
1668
1669        // Process fragment #0
1670        I::process_ip_fragment(
1671            &mut core_ctx,
1672            &mut bindings_ctx,
1673            FragmentSpec { id, offset: 0, size, m_flag: true },
1674            ExpectedResult::NeedMore,
1675        );
1676
1677        // Process fragment #2
1678        I::process_ip_fragment(
1679            &mut core_ctx,
1680            &mut bindings_ctx,
1681            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1682            ExpectedResult::NeedMore,
1683        );
1684
1685        // Process fragment #1
1686        I::process_ip_fragment(
1687            &mut core_ctx,
1688            &mut bindings_ctx,
1689            FragmentSpec { id, offset: size, size, m_flag: true },
1690            ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1691        );
1692    }
1693
1694    #[ip_test(I)]
1695    #[test_case(1)]
1696    #[test_case(10)]
1697    #[test_case(100)]
1698    fn test_ip_duplicate_fragment<I: TestIpExt>(size: u16) {
1699        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1700        let id = 5;
1701
1702        // Process fragment #0
1703        I::process_ip_fragment(
1704            &mut core_ctx,
1705            &mut bindings_ctx,
1706            FragmentSpec { id, offset: 0, size, m_flag: true },
1707            ExpectedResult::NeedMore,
1708        );
1709
1710        // Process the exact same fragment over again. It should be ignored.
1711        I::process_ip_fragment(
1712            &mut core_ctx,
1713            &mut bindings_ctx,
1714            FragmentSpec { id, offset: 0, size, m_flag: true },
1715            ExpectedResult::NeedMore,
1716        );
1717
1718        // Verify that the fragment's cache is intact by sending the remaining
1719        // fragment.
1720        I::process_ip_fragment(
1721            &mut core_ctx,
1722            &mut bindings_ctx,
1723            FragmentSpec { id, offset: size, size, m_flag: false },
1724            ExpectedResult::Ready { body_fragment_blocks: 2 * size, key: test_key(id) },
1725        );
1726
1727        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 2 * size);
1728    }
1729
1730    #[ip_test(I)]
1731    #[test_case(1)]
1732    #[test_case(10)]
1733    #[test_case(100)]
1734    fn test_ip_out_of_bounds_fragment<I: TestIpExt>(size: u16) {
1735        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1736        let id = 5;
1737
1738        // Process fragment #1
1739        I::process_ip_fragment(
1740            &mut core_ctx,
1741            &mut bindings_ctx,
1742            FragmentSpec { id, offset: size, size, m_flag: false },
1743            ExpectedResult::NeedMore,
1744        );
1745
1746        // Process a fragment after fragment #1. It should be deemed invalid
1747        // because fragment #1 was the end.
1748        I::process_ip_fragment(
1749            &mut core_ctx,
1750            &mut bindings_ctx,
1751            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1752            ExpectedResult::Invalid,
1753        );
1754    }
1755
1756    #[ip_test(I)]
1757    #[test_case(50, 100; "overlaps_front")]
1758    #[test_case(150, 100; "overlaps_back")]
1759    #[test_case(50, 200; "overlaps_both")]
1760    fn test_ip_overlapping_fragment<I: TestIpExt>(offset: u16, size: u16) {
1761        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1762        let id = 5;
1763
1764        // Process fragment #0
1765        I::process_ip_fragment(
1766            &mut core_ctx,
1767            &mut bindings_ctx,
1768            FragmentSpec { id, offset: 100, size: 100, m_flag: true },
1769            ExpectedResult::NeedMore,
1770        );
1771
1772        // Process a fragment that overlaps with fragment 0. It should be deemed
1773        // invalid.
1774        I::process_ip_fragment(
1775            &mut core_ctx,
1776            &mut bindings_ctx,
1777            FragmentSpec { id, offset, size, m_flag: true },
1778            ExpectedResult::Invalid,
1779        );
1780    }
1781
1782    #[test]
1783    fn test_ipv4_fragment_not_multiple_of_offset_unit() {
1784        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1785        let id = 0;
1786
1787        assert_eq!(core_ctx.state.cache.size, 0);
1788        // Test that fragment bodies must be a multiple of
1789        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1790
1791        // Process fragment #0
1792        process_ipv4_fragment(
1793            &mut core_ctx,
1794            &mut bindings_ctx,
1795            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1796            get_ipv4_builder(),
1797            ExpectedResult::NeedMore,
1798        );
1799
1800        // Process fragment #1 (body size is not a multiple of
1801        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1802        let mut builder = get_ipv4_builder();
1803        builder.id(id);
1804        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1805        builder.mf_flag(true);
1806        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1807        // multiple of `FRAGMENT_BLOCK_SIZE`.
1808        let mut body: Vec<u8> = Vec::new();
1809        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1810        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1811        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1812        assert_matches!(
1813            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1814            FragmentProcessingState::InvalidFragment
1815        );
1816
1817        // Process fragment #1 (body size is not a multiple of
1818        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1819        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1820        let mut builder = get_ipv4_builder();
1821        builder.id(id);
1822        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1823        builder.mf_flag(false);
1824        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1825        // multiple of `FRAGMENT_BLOCK_SIZE`.
1826        let mut body: Vec<u8> = Vec::new();
1827        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1828        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1829        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1830        let (key, packet_len) = assert_matches!(
1831            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1832            FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1833        );
1834        assert_eq!(key, test_key(id));
1835        assert_eq!(packet_len, 35);
1836        validate_size(&core_ctx.state.cache);
1837        let mut buffer: Vec<u8> = vec![0; packet_len];
1838        let mut buffer = &mut buffer[..];
1839        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1840            .unwrap();
1841        let packet = Ipv4Packet::parse_mut(&mut buffer, ()).unwrap();
1842        let mut expected_body: Vec<u8> = Vec::new();
1843        expected_body.extend(0..15);
1844        assert_eq!(packet.body(), &expected_body[..]);
1845        assert_eq!(core_ctx.state.cache.size, 0);
1846    }
1847
1848    #[test]
1849    fn test_ipv6_fragment_not_multiple_of_offset_unit() {
1850        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1851        let id = 0;
1852
1853        assert_eq!(core_ctx.state.cache.size, 0);
1854        // Test that fragment bodies must be a multiple of
1855        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1856
1857        // Process fragment #0
1858        process_ipv6_fragment(
1859            &mut core_ctx,
1860            &mut bindings_ctx,
1861            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1862            get_ipv6_builder(),
1863            ExpectedResult::NeedMore,
1864        );
1865
1866        // Process fragment #1 (body size is not a multiple of
1867        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1868        let offset = 1;
1869        let body_size: usize = (FRAGMENT_BLOCK_SIZE - 1).into();
1870        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1871            get_ipv6_builder(),
1872            FragmentOffset::new(offset).unwrap(),
1873            true,
1874            id.into(),
1875        );
1876        let body = generate_body_fragment(id, offset, body_size);
1877        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1878        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1879        assert_matches!(
1880            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1881            FragmentProcessingState::InvalidFragment
1882        );
1883
1884        // Process fragment #1 (body size is not a multiple of
1885        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1886        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1887        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1888            get_ipv6_builder(),
1889            FragmentOffset::new(offset).unwrap(),
1890            false,
1891            id.into(),
1892        );
1893        let body = generate_body_fragment(id, offset, body_size);
1894        let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1895        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1896        let (key, packet_len) = assert_matches!(
1897            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1898            FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1899        );
1900        assert_eq!(key, test_key(id));
1901        assert_eq!(packet_len, 55);
1902
1903        validate_size(&core_ctx.state.cache);
1904        let mut buffer: Vec<u8> = vec![0; packet_len];
1905        let mut buffer = &mut buffer[..];
1906        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1907            .unwrap();
1908        let packet = Ipv6Packet::parse_mut(&mut buffer, ()).unwrap();
1909        let mut expected_body: Vec<u8> = Vec::new();
1910        expected_body.extend(0..15);
1911        assert_eq!(packet.body(), &expected_body[..]);
1912        assert_eq!(core_ctx.state.cache.size, 0);
1913    }
1914
1915    #[ip_test(I)]
1916    fn test_ip_reassembly_with_multiple_intertwined_packets<I: TestIpExt>() {
1917        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1918        const SIZE: u16 = 1;
1919        let id_0 = 5;
1920        let id_1 = 10;
1921
1922        // Test that we properly reassemble fragmented packets when they arrive
1923        // intertwined with other packets' fragments.
1924
1925        // Process fragment #0 for packet #0
1926        I::process_ip_fragment(
1927            &mut core_ctx,
1928            &mut bindings_ctx,
1929            FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
1930            ExpectedResult::NeedMore,
1931        );
1932
1933        // Process fragment #0 for packet #1
1934        I::process_ip_fragment(
1935            &mut core_ctx,
1936            &mut bindings_ctx,
1937            FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
1938            ExpectedResult::NeedMore,
1939        );
1940
1941        // Process fragment #1 for packet #0
1942        I::process_ip_fragment(
1943            &mut core_ctx,
1944            &mut bindings_ctx,
1945            FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
1946            ExpectedResult::NeedMore,
1947        );
1948
1949        // Process fragment #1 for packet #0
1950        I::process_ip_fragment(
1951            &mut core_ctx,
1952            &mut bindings_ctx,
1953            FragmentSpec { id: id_1, offset: 1, size: SIZE, m_flag: true },
1954            ExpectedResult::NeedMore,
1955        );
1956
1957        // Process fragment #2 for packet #0
1958        I::process_ip_fragment(
1959            &mut core_ctx,
1960            &mut bindings_ctx,
1961            FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
1962            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
1963        );
1964
1965        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
1966
1967        // Process fragment #2 for packet #1
1968        I::process_ip_fragment(
1969            &mut core_ctx,
1970            &mut bindings_ctx,
1971            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
1972            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_1) },
1973        );
1974
1975        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_1, 3);
1976    }
1977
1978    #[ip_test(I)]
1979    fn test_ip_reassembly_timer_with_multiple_intertwined_packets<I: TestIpExt>() {
1980        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1981        const SIZE: u16 = 1;
1982        let id_0 = 5;
1983        let id_1 = 10;
1984        let id_2 = 15;
1985
1986        // Test that we properly timer with multiple intertwined packets that
1987        // all arrive out of order. We expect packet 1 and 3 to succeed, and
1988        // packet 1 to fail due to the reassembly timer.
1989        //
1990        // The flow of events:
1991        //   T=0:
1992        //     - Packet #0, Fragment #0 arrives (timer scheduled for T=60s).
1993        //     - Packet #1, Fragment #2 arrives (timer scheduled for T=60s).
1994        //     - Packet #2, Fragment #2 arrives (timer scheduled for T=60s).
1995        //   T=BEFORE_TIMEOUT1:
1996        //     - Packet #0, Fragment #2 arrives.
1997        //   T=BEFORE_TIMEOUT2:
1998        //     - Packet #2, Fragment #1 arrives.
1999        //     - Packet #0, Fragment #1 arrives (timer cancelled since all
2000        //       fragments arrived).
2001        //   T=BEFORE_TIMEOUT3:
2002        //     - Packet #1, Fragment #0 arrives.
2003        //     - Packet #2, Fragment #0 arrives (timer cancelled since all
2004        //       fragments arrived).
2005        //   T=TIMEOUT:
2006        //     - Timeout for reassembly of Packet #1.
2007        //     - Packet #1, Fragment #1 arrives (final fragment but timer
2008        //       already triggered so fragment not complete).
2009
2010        const BEFORE_TIMEOUT1: Duration = Duration::from_secs(1);
2011        const BEFORE_TIMEOUT2: Duration = Duration::from_secs(2);
2012        const BEFORE_TIMEOUT3: Duration = Duration::from_secs(3);
2013        assert!(BEFORE_TIMEOUT1 < I::REASSEMBLY_TIMEOUT);
2014        assert!(BEFORE_TIMEOUT2 < I::REASSEMBLY_TIMEOUT);
2015        assert!(BEFORE_TIMEOUT3 < I::REASSEMBLY_TIMEOUT);
2016
2017        // Process fragment #0 for packet #0
2018        I::process_ip_fragment(
2019            &mut core_ctx,
2020            &mut bindings_ctx,
2021            FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
2022            ExpectedResult::NeedMore,
2023        );
2024
2025        // Process fragment #1 for packet #1
2026        I::process_ip_fragment(
2027            &mut core_ctx,
2028            &mut bindings_ctx,
2029            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
2030            ExpectedResult::NeedMore,
2031        );
2032
2033        // Process fragment #2 for packet #2
2034        I::process_ip_fragment(
2035            &mut core_ctx,
2036            &mut bindings_ctx,
2037            FragmentSpec { id: id_2, offset: 2, size: SIZE, m_flag: false },
2038            ExpectedResult::NeedMore,
2039        );
2040
2041        // Advance time.
2042        assert_empty(
2043            bindings_ctx
2044                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT1), &mut core_ctx),
2045        );
2046
2047        // Process fragment #2 for packet #0
2048        I::process_ip_fragment(
2049            &mut core_ctx,
2050            &mut bindings_ctx,
2051            FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
2052            ExpectedResult::NeedMore,
2053        );
2054
2055        // Advance time.
2056        assert_empty(
2057            bindings_ctx
2058                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT2), &mut core_ctx),
2059        );
2060
2061        // Process fragment #1 for packet #2
2062        I::process_ip_fragment(
2063            &mut core_ctx,
2064            &mut bindings_ctx,
2065            FragmentSpec { id: id_2, offset: 1, size: SIZE, m_flag: true },
2066            ExpectedResult::NeedMore,
2067        );
2068
2069        // Process fragment #1 for packet #0
2070        I::process_ip_fragment(
2071            &mut core_ctx,
2072            &mut bindings_ctx,
2073            FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
2074            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
2075        );
2076
2077        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
2078
2079        // Advance time.
2080        assert_empty(
2081            bindings_ctx
2082                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT3), &mut core_ctx),
2083        );
2084
2085        // Process fragment #0 for packet #1
2086        I::process_ip_fragment(
2087            &mut core_ctx,
2088            &mut bindings_ctx,
2089            FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
2090            ExpectedResult::NeedMore,
2091        );
2092
2093        // Process fragment #0 for packet #2
2094        I::process_ip_fragment(
2095            &mut core_ctx,
2096            &mut bindings_ctx,
2097            FragmentSpec { id: id_2, offset: 0, size: SIZE, m_flag: true },
2098            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_2) },
2099        );
2100
2101        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_2, 3);
2102
2103        // Advance time to the timeout, triggering the timer for the reassembly
2104        // of packet #1
2105        bindings_ctx.trigger_timers_until_and_expect_unordered(
2106            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
2107            [FragmentTimerId::<I>::default()],
2108            &mut core_ctx,
2109        );
2110
2111        // Make sure no other times exist.
2112        bindings_ctx.timers.assert_no_timers_installed();
2113
2114        // Process fragment #2 for packet #1 Should get a need more return value
2115        // since even though we technically received all the fragments, the last
2116        // fragment didn't arrive until after the reassembly timer.
2117        I::process_ip_fragment(
2118            &mut core_ctx,
2119            &mut bindings_ctx,
2120            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: true },
2121            ExpectedResult::NeedMore,
2122        );
2123    }
2124
2125    #[test]
2126    fn test_no_more_fragments_in_middle_of_block() {
2127        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
2128        process_ipv4_fragment(
2129            &mut core_ctx,
2130            &mut bindings_ctx,
2131            FragmentSpec { id: 0, offset: 100, size: 1, m_flag: false },
2132            get_ipv4_builder(),
2133            ExpectedResult::NeedMore,
2134        );
2135
2136        process_ipv4_fragment(
2137            &mut core_ctx,
2138            &mut bindings_ctx,
2139            FragmentSpec { id: 0, offset: 50, size: 1, m_flag: false },
2140            get_ipv4_builder(),
2141            ExpectedResult::Invalid,
2142        );
2143    }
2144
2145    #[ip_test(I)]
2146    fn test_cancel_timer_on_overlap<I: TestIpExt>() {
2147        const FRAGMENT_ID: u16 = 1;
2148
2149        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
2150
2151        let key = test_key(FRAGMENT_ID);
2152
2153        // Do this a couple times to make sure that new packets matching the
2154        // invalid packet's fragment cache key create a new entry.
2155        for _ in 0..=2 {
2156            I::process_ip_fragment(
2157                &mut core_ctx,
2158                &mut bindings_ctx,
2159                FragmentSpec { id: FRAGMENT_ID, offset: 0, size: 10, m_flag: true },
2160                ExpectedResult::NeedMore,
2161            );
2162            core_ctx
2163                .state
2164                .cache
2165                .timers
2166                .assert_timers_after(&mut bindings_ctx, [(key, (), I::REASSEMBLY_TIMEOUT)]);
2167
2168            I::process_ip_fragment(
2169                &mut core_ctx,
2170                &mut bindings_ctx,
2171                FragmentSpec { id: FRAGMENT_ID, offset: 5, size: 10, m_flag: true },
2172                ExpectedResult::Invalid,
2173            );
2174            assert_eq!(bindings_ctx.timers.timers(), [],);
2175        }
2176    }
2177}