1use alloc::collections::{BTreeSet, BinaryHeap};
34use alloc::vec::Vec;
35use core::cmp::Ordering;
36use core::fmt::Debug;
37use core::hash::Hash;
38use core::time::Duration;
39
40use assert_matches::assert_matches;
41use log::debug;
42use net_types::ip::{GenericOverIp, Ip, IpAddr, IpVersionMarker, Ipv4, Ipv6};
43use netstack3_base::{
44 CoreTimerContext, HandleableTimer, InstantBindingsTypes, IpExt, LocalTimerHeap,
45 TimerBindingsTypes, TimerContext,
46};
47use netstack3_hashmap::hash_map::{Entry, HashMap};
48use packet::BufferViewMut;
49use packet_formats::ip::{IpPacket, Ipv4Proto};
50use packet_formats::ipv4::{Ipv4Header, Ipv4Packet};
51use packet_formats::ipv6::Ipv6Packet;
52use packet_formats::ipv6::ext_hdrs::Ipv6ExtensionHeaderData;
53use zerocopy::{SplitByteSlice, SplitByteSliceMut};
54
55pub trait ReassemblyIpExt: IpExt {
57 const REASSEMBLY_TIMEOUT: Duration;
63
64 type FragmentCacheKeyPart: Copy + Clone + Debug + Hash + PartialEq + Eq;
67
68 fn ip_specific_key_part<B: SplitByteSlice>(
71 packet: &Self::Packet<B>,
72 ) -> Self::FragmentCacheKeyPart;
73}
74
75impl ReassemblyIpExt for Ipv4 {
76 const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(15);
80
81 type FragmentCacheKeyPart = Ipv4Proto;
88
89 fn ip_specific_key_part<B: SplitByteSlice>(
90 packet: &Self::Packet<B>,
91 ) -> Self::FragmentCacheKeyPart {
92 IpPacket::proto(packet)
93 }
94}
95
96impl ReassemblyIpExt for Ipv6 {
97 const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(60);
104
105 type FragmentCacheKeyPart = ();
111
112 fn ip_specific_key_part<B: SplitByteSlice>(
113 _packet: &Self::Packet<B>,
114 ) -> Self::FragmentCacheKeyPart {
115 ()
116 }
117}
118
119const FRAGMENT_BLOCK_SIZE: u8 = 8;
129
130const MAX_FRAGMENT_BLOCKS: u16 = 8191;
135
136const MAX_FRAGMENT_CACHE_SIZE: usize = 4 * 1024 * 1024;
143
144pub trait FragmentContext<I: Ip, BT: FragmentBindingsTypes> {
146 fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O;
148}
149
150pub trait FragmentBindingsTypes: TimerBindingsTypes + InstantBindingsTypes {}
152impl<BT> FragmentBindingsTypes for BT where BT: TimerBindingsTypes + InstantBindingsTypes {}
153
154pub trait FragmentBindingsContext: TimerContext + FragmentBindingsTypes {}
156impl<BC> FragmentBindingsContext for BC where BC: TimerContext + FragmentBindingsTypes {}
157
158#[derive(Hash, Eq, PartialEq, Default, Clone, Debug, GenericOverIp)]
160#[generic_over_ip(I, Ip)]
161pub struct FragmentTimerId<I: Ip>(IpVersionMarker<I>);
162
163pub trait FragmentHandler<I: ReassemblyIpExt, BC> {
165 fn process_fragment<B: SplitByteSlice>(
171 &mut self,
172 bindings_ctx: &mut BC,
173 packet: I::Packet<B>,
174 ) -> FragmentProcessingState<I, B>
175 where
176 I::Packet<B>: FragmentablePacket;
177
178 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
196 &mut self,
197 bindings_ctx: &mut BC,
198 key: &FragmentCacheKey<I>,
199 buffer: BV,
200 ) -> Result<(), FragmentReassemblyError>;
201}
202
203impl<I: IpExt + ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
204 FragmentHandler<I, BC> for CC
205{
206 fn process_fragment<B: SplitByteSlice>(
207 &mut self,
208 bindings_ctx: &mut BC,
209 packet: I::Packet<B>,
210 ) -> FragmentProcessingState<I, B>
211 where
212 I::Packet<B>: FragmentablePacket,
213 {
214 self.with_state_mut(|cache| {
215 let (res, timer_action) = cache.process_fragment(packet);
216
217 if let Some(timer_action) = timer_action {
218 match timer_action {
219 CacheTimerAction::CreateNewTimer(key) => {
222 assert_eq!(
223 cache.timers.schedule_after(
224 bindings_ctx,
225 key,
226 (),
227 I::REASSEMBLY_TIMEOUT,
228 ),
229 None
230 )
231 }
232 CacheTimerAction::CancelExistingTimer(key) => {
233 assert_ne!(cache.timers.cancel(bindings_ctx, &key), None)
234 }
235 }
236 }
237
238 res
239 })
240 }
241
242 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
243 &mut self,
244 bindings_ctx: &mut BC,
245 key: &FragmentCacheKey<I>,
246 buffer: BV,
247 ) -> Result<(), FragmentReassemblyError> {
248 self.with_state_mut(|cache| {
249 let res = cache.reassemble_packet(key, buffer);
250
251 match res {
252 Ok(_) | Err(FragmentReassemblyError::PacketParsingError) => {
253 assert_matches!(cache.timers.cancel(bindings_ctx, key), Some(_));
257 }
258 Err(FragmentReassemblyError::InvalidKey)
259 | Err(FragmentReassemblyError::MissingFragments) => {}
260 }
261
262 res
263 })
264 }
265}
266
267impl<I: ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
268 HandleableTimer<CC, BC> for FragmentTimerId<I>
269{
270 fn handle(self, core_ctx: &mut CC, bindings_ctx: &mut BC, _: BC::UniqueTimerId) {
271 let Self(IpVersionMarker { .. }) = self;
272 core_ctx.with_state_mut(|cache| {
273 let Some((key, ())) = cache.timers.pop(bindings_ctx) else {
274 return;
275 };
276
277 let FragmentCacheData { missing_blocks: _, body_fragments, header: _, total_size } =
279 assert_matches!(cache.remove_data(&key), Some(c) => c);
280 debug!(
281 "reassembly for {key:?} \
282 timed out with {} fragments and {total_size} bytes",
283 body_fragments.len(),
284 );
285 });
286 }
287}
288
289pub trait FragmentablePacket {
291 fn fragment_data(&self) -> (u32, u16, bool);
301}
302
303impl<B: SplitByteSlice> FragmentablePacket for Ipv4Packet<B> {
304 fn fragment_data(&self) -> (u32, u16, bool) {
305 (u32::from(self.id()), self.fragment_offset().into_raw(), self.mf_flag())
306 }
307}
308
309impl<B: SplitByteSlice> FragmentablePacket for Ipv6Packet<B> {
310 fn fragment_data(&self) -> (u32, u16, bool) {
311 for ext_hdr in self.iter_extension_hdrs() {
312 if let Ipv6ExtensionHeaderData::Fragment { fragment_data } = ext_hdr.data() {
313 return (
314 fragment_data.identification(),
315 fragment_data.fragment_offset().into_raw(),
316 fragment_data.m_flag(),
317 );
318 }
319 }
320
321 unreachable!(
322 "Should never call this function if the packet does not have a fragment header"
323 );
324 }
325}
326
327#[derive(Debug)]
329pub enum FragmentProcessingState<I: ReassemblyIpExt, B: SplitByteSlice> {
330 NotNeeded(I::Packet<B>),
333
334 InvalidFragment,
346
347 NeedMoreFragments,
351
352 OutOfMemory,
355
356 Ready { key: FragmentCacheKey<I>, packet_len: usize },
361}
362
363#[derive(Debug, PartialEq, Eq)]
365pub enum FragmentReassemblyError {
366 MissingFragments,
368
369 InvalidKey,
374
375 PacketParsingError,
377}
378
379#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
384pub struct FragmentCacheKey<I: ReassemblyIpExt> {
385 src_ip: I::Addr,
386 dst_ip: I::Addr,
387 fragment_id: u32,
388 ip_specific_fields: I::FragmentCacheKeyPart,
389}
390
391#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
396struct BlockRange {
397 start: u16,
398 end: u16,
399}
400
401#[derive(Debug)]
403struct FragmentCacheData {
404 missing_blocks: BTreeSet<BlockRange>,
412
413 body_fragments: BinaryHeap<PacketBodyFragment>,
425
426 header: Option<Vec<u8>>,
431
432 total_size: usize,
438}
439
440impl Default for FragmentCacheData {
441 fn default() -> FragmentCacheData {
442 FragmentCacheData {
443 missing_blocks: core::iter::once(BlockRange { start: 0, end: u16::MAX }).collect(),
444 body_fragments: BinaryHeap::new(),
445 header: None,
446 total_size: 0,
447 }
448 }
449}
450
451impl FragmentCacheData {
452 fn find_gap(&self, BlockRange { start, end }: BlockRange) -> FindGapResult {
454 let result = self.missing_blocks.iter().find_map(|gap| {
455 if gap.start <= start && gap.end >= end {
457 return Some(FindGapResult::Found { gap: *gap });
458 }
459
460 if gap.start > end || gap.end < start {
463 return None;
464 }
465
466 return Some(FindGapResult::Overlap);
469 });
470
471 match result {
472 Some(result) => result,
473 None => {
474 let last = self.body_fragments.peek().unwrap();
484 if last.offset < start {
485 FindGapResult::OutOfBounds
486 } else {
487 FindGapResult::Duplicate
488 }
489 }
490 }
491 }
492}
493
494enum FindGapResult {
496 Found {
499 gap: BlockRange,
500 },
501 Overlap,
505 OutOfBounds,
507 Duplicate,
523}
524
525#[derive(Debug)]
527pub struct IpPacketFragmentCache<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
528 cache: HashMap<FragmentCacheKey<I>, FragmentCacheData>,
529 size: usize,
530 threshold: usize,
531 timers: LocalTimerHeap<FragmentCacheKey<I>, (), BT>,
532}
533
534impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> IpPacketFragmentCache<I, BC> {
535 pub fn new<CC: CoreTimerContext<FragmentTimerId<I>, BC>>(
537 bindings_ctx: &mut BC,
538 ) -> IpPacketFragmentCache<I, BC> {
539 IpPacketFragmentCache {
540 cache: HashMap::new(),
541 size: 0,
542 threshold: MAX_FRAGMENT_CACHE_SIZE,
543 timers: LocalTimerHeap::new(bindings_ctx, CC::convert_timer(Default::default())),
544 }
545 }
546}
547
548enum CacheTimerAction<I: ReassemblyIpExt> {
549 CreateNewTimer(FragmentCacheKey<I>),
550 CancelExistingTimer(FragmentCacheKey<I>),
551}
552
553impl<I: ReassemblyIpExt, BT: FragmentBindingsTypes> IpPacketFragmentCache<I, BT> {
554 fn process_fragment<B: SplitByteSlice>(
560 &mut self,
561 packet: I::Packet<B>,
562 ) -> (FragmentProcessingState<I, B>, Option<CacheTimerAction<I>>)
563 where
564 I::Packet<B>: FragmentablePacket,
565 {
566 if self.above_size_threshold() {
567 return (FragmentProcessingState::OutOfMemory, None);
568 }
569
570 let (id, offset, m_flag) = packet.fragment_data();
572
573 if offset == 0 && !m_flag {
578 return (FragmentProcessingState::NotNeeded(packet), None);
579 }
580
581 if packet.body().is_empty() {
586 return (FragmentProcessingState::NeedMoreFragments, None);
587 }
588
589 if m_flag && (packet.body().len() % (FRAGMENT_BLOCK_SIZE as usize) != 0) {
593 return (FragmentProcessingState::InvalidFragment, None);
594 }
595
596 let key = FragmentCacheKey {
598 src_ip: packet.src_ip(),
599 dst_ip: packet.dst_ip(),
600 fragment_id: id,
601 ip_specific_fields: I::ip_specific_key_part(&packet),
602 };
603
604 let num_fragment_blocks = 1 + ((packet.body().len() - 1) / (FRAGMENT_BLOCK_SIZE as usize));
618 assert!(num_fragment_blocks > 0);
619
620 let fragment_blocks_range =
626 if let Ok(offset_end) = u16::try_from((offset as usize) + num_fragment_blocks - 1) {
627 if offset_end <= MAX_FRAGMENT_BLOCKS {
628 BlockRange { start: offset, end: offset_end }
629 } else {
630 return (FragmentProcessingState::InvalidFragment, None);
631 }
632 } else {
633 return (FragmentProcessingState::InvalidFragment, None);
634 };
635
636 let (fragment_data, timer_not_yet_scheduled) = self.get_or_create(key);
638
639 let found_gap = match fragment_data.find_gap(fragment_blocks_range) {
641 FindGapResult::Overlap | FindGapResult::OutOfBounds => {
642 assert_matches!(self.remove_data(&key), Some(_));
654
655 return (
656 FragmentProcessingState::InvalidFragment,
657 (!timer_not_yet_scheduled)
658 .then_some(CacheTimerAction::CancelExistingTimer(key)),
659 );
660 }
661 FindGapResult::Duplicate => {
662 return (FragmentProcessingState::NeedMoreFragments, None);
674 }
675 FindGapResult::Found { gap } => gap,
676 };
677
678 let timer_id = timer_not_yet_scheduled.then_some(CacheTimerAction::CreateNewTimer(key));
679
680 assert!(fragment_data.missing_blocks.remove(&found_gap));
683
684 if found_gap.start < fragment_blocks_range.start {
701 assert!(fragment_data.missing_blocks.insert(BlockRange {
702 start: found_gap.start,
703 end: fragment_blocks_range.start - 1
704 }));
705 }
706
707 if found_gap.end > fragment_blocks_range.end && m_flag {
742 assert!(
743 fragment_data.missing_blocks.insert(BlockRange {
744 start: fragment_blocks_range.end + 1,
745 end: found_gap.end
746 })
747 );
748 } else if found_gap.end > fragment_blocks_range.end && !m_flag && found_gap.end < u16::MAX {
749 return (FragmentProcessingState::InvalidFragment, timer_id);
753 } else {
754 assert!(
760 found_gap.end == fragment_blocks_range.end
761 || (!m_flag && found_gap.end == u16::MAX),
762 "found_gap: {:?}, fragment_blocks_range: {:?} offset: {:?}, m_flag: {:?}",
763 found_gap,
764 fragment_blocks_range,
765 offset,
766 m_flag
767 );
768 }
769
770 let mut added_bytes = 0;
771 if offset == 0 {
773 assert_eq!(fragment_data.header, None);
774 let header = get_header::<B, I>(&packet);
775 added_bytes = header.len();
776 fragment_data.header = Some(header);
777 }
778
779 let mut body = Vec::with_capacity(packet.body().len());
781 body.extend_from_slice(packet.body());
782 added_bytes += body.len();
783 fragment_data.total_size += added_bytes;
784 fragment_data.body_fragments.push(PacketBodyFragment::new(offset, body));
785
786 let result = if fragment_data.missing_blocks.is_empty() {
792 FragmentProcessingState::Ready { key, packet_len: fragment_data.total_size }
793 } else {
794 FragmentProcessingState::NeedMoreFragments
795 };
796
797 self.increment_size(added_bytes);
798 (result, timer_id)
799 }
800
801 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
819 &mut self,
820 key: &FragmentCacheKey<I>,
821 buffer: BV,
822 ) -> Result<(), FragmentReassemblyError> {
823 let entry = match self.cache.entry(*key) {
824 Entry::Occupied(entry) => entry,
825 Entry::Vacant(_) => return Err(FragmentReassemblyError::InvalidKey),
826 };
827
828 if !entry.get().missing_blocks.is_empty() {
830 return Err(FragmentReassemblyError::MissingFragments);
831 }
832 let (_key, data) = entry.remove_entry();
835 self.size -= data.total_size;
836
837 assert_matches!(data.header, Some(_));
839
840 let body_fragments = data.body_fragments.into_sorted_vec().into_iter().map(|x| x.data);
843 I::Packet::reassemble_fragmented_packet(buffer, data.header.unwrap(), body_fragments)
844 .map_err(|_| FragmentReassemblyError::PacketParsingError)
845 }
846
847 fn get_or_create(&mut self, key: FragmentCacheKey<I>) -> (&mut FragmentCacheData, bool) {
852 match self.cache.entry(key) {
853 Entry::Occupied(e) => (e.into_mut(), false),
854 Entry::Vacant(e) => {
855 (e.insert(FragmentCacheData::default()), true)
860 }
861 }
862 }
863
864 fn above_size_threshold(&self) -> bool {
865 self.size >= self.threshold
866 }
867
868 fn increment_size(&mut self, sz: usize) {
869 assert!(!self.above_size_threshold());
870 self.size += sz;
871 }
872
873 fn remove_data(&mut self, key: &FragmentCacheKey<I>) -> Option<FragmentCacheData> {
874 let data = self.cache.remove(key)?;
875 self.size -= data.total_size;
876 Some(data)
877 }
878}
879
880fn get_header<B: SplitByteSlice, I: IpExt>(packet: &I::Packet<B>) -> Vec<u8> {
882 match packet.as_ip_addr_ref() {
883 IpAddr::V4(packet) => packet.copy_header_bytes_for_fragment(),
884 IpAddr::V6(packet) => {
885 packet.copy_header_bytes_for_fragment()
890 }
891 }
892}
893
894#[derive(Debug, PartialEq, Eq)]
896struct PacketBodyFragment {
897 offset: u16,
898 data: Vec<u8>,
899}
900
901impl PacketBodyFragment {
902 fn new(offset: u16, data: Vec<u8>) -> Self {
904 PacketBodyFragment { offset, data }
905 }
906}
907
908impl PartialOrd for PacketBodyFragment {
911 fn partial_cmp(&self, other: &PacketBodyFragment) -> Option<Ordering> {
912 Some(self.cmp(other))
913 }
914}
915
916impl Ord for PacketBodyFragment {
917 fn cmp(&self, other: &Self) -> Ordering {
918 self.offset.cmp(&other.offset)
919 }
920}
921
922#[cfg(test)]
923mod tests {
924 use alloc::vec;
925
926 use assert_matches::assert_matches;
927 use ip_test_macro::ip_test;
928 use net_declare::{net_ip_v4, net_ip_v6};
929 use net_types::Witness;
930 use net_types::ip::{Ipv4, Ipv4Addr, Ipv6, Ipv6Addr};
931 use netstack3_base::testutil::{
932 FakeBindingsCtx, FakeCoreCtx, FakeInstant, FakeTimerCtxExt, TEST_ADDRS_V4, TEST_ADDRS_V6,
933 assert_empty,
934 };
935 use netstack3_base::{CtxPair, IntoCoreTimerCtx};
936 use packet::{Buf, PacketBuilder, ParsablePacket, ParseBuffer, Serializer};
937 use packet_formats::ip::{FragmentOffset, IpProto, Ipv6Proto};
938 use packet_formats::ipv4::Ipv4PacketBuilder;
939 use packet_formats::ipv6::{Ipv6PacketBuilder, Ipv6PacketBuilderWithFragmentHeader};
940 use test_case::test_case;
941
942 use super::*;
943
944 struct FakeFragmentContext<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
945 cache: IpPacketFragmentCache<I, BT>,
946 }
947
948 impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> FakeFragmentContext<I, BC>
949 where
950 BC::DispatchId: From<FragmentTimerId<I>>,
951 {
952 fn new(bindings_ctx: &mut BC) -> Self {
953 Self { cache: IpPacketFragmentCache::new::<IntoCoreTimerCtx>(bindings_ctx) }
954 }
955 }
956
957 type FakeCtxImpl<I> = CtxPair<FakeCoreCtxImpl<I>, FakeBindingsCtxImpl<I>>;
958 type FakeBindingsCtxImpl<I> = FakeBindingsCtx<FragmentTimerId<I>, (), (), ()>;
959 type FakeCoreCtxImpl<I> = FakeCoreCtx<FakeFragmentContext<I, FakeBindingsCtxImpl<I>>, (), ()>;
960
961 impl<I: ReassemblyIpExt> FragmentContext<I, FakeBindingsCtxImpl<I>> for FakeCoreCtxImpl<I> {
962 fn with_state_mut<
963 O,
964 F: FnOnce(&mut IpPacketFragmentCache<I, FakeBindingsCtxImpl<I>>) -> O,
965 >(
966 &mut self,
967 cb: F,
968 ) -> O {
969 cb(&mut self.state.cache)
970 }
971 }
972
973 #[derive(PartialEq)]
976 enum ExpectedResult<I: ReassemblyIpExt> {
977 Ready { body_fragment_blocks: u16, key: FragmentCacheKey<I> },
982
983 NeedMore,
986
987 Invalid,
989
990 OutOfMemory,
992 }
993
994 fn get_ipv4_builder() -> Ipv4PacketBuilder {
996 Ipv4PacketBuilder::new(
997 TEST_ADDRS_V4.remote_ip,
998 TEST_ADDRS_V4.local_ip,
999 10,
1000 <Ipv4 as TestIpExt>::PROTOCOL,
1001 )
1002 }
1003
1004 fn get_ipv6_builder() -> Ipv6PacketBuilder {
1006 Ipv6PacketBuilder::new(
1007 TEST_ADDRS_V6.remote_ip,
1008 TEST_ADDRS_V6.local_ip,
1009 10,
1010 <Ipv6 as TestIpExt>::PROTOCOL,
1011 )
1012 }
1013
1014 fn validate_size<I: ReassemblyIpExt, BT: FragmentBindingsTypes>(
1016 cache: &IpPacketFragmentCache<I, BT>,
1017 ) {
1018 let mut sz: usize = 0;
1019
1020 for v in cache.cache.values() {
1021 sz += v.total_size;
1022 }
1023
1024 assert_eq!(sz, cache.size);
1025 }
1026
1027 struct FragmentSpec {
1028 id: u16,
1030 offset: u16,
1032 size: u16,
1034 m_flag: bool,
1036 }
1037
1038 fn expected_packet_size<I: TestIpExt>(num_fragment_blocks: u16) -> usize {
1039 usize::from(num_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE) + I::HEADER_LENGTH
1040 }
1041
1042 fn process_ipv4_fragment<CC: FragmentContext<Ipv4, BC>, BC: FragmentBindingsContext>(
1044 core_ctx: &mut CC,
1045 bindings_ctx: &mut BC,
1046 FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1047 mut builder: Ipv4PacketBuilder,
1048 expected_result: ExpectedResult<Ipv4>,
1049 ) {
1050 builder.id(id);
1051 builder.fragment_offset(FragmentOffset::new(offset).unwrap());
1052 builder.mf_flag(m_flag);
1053 let body = generate_body_fragment(
1054 id,
1055 offset,
1056 usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1057 );
1058
1059 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1060 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1061
1062 let actual_result =
1063 FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1064 match expected_result {
1065 ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1066 let (key, packet_len) = assert_matches!(
1067 actual_result,
1068 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1069 );
1070 assert_eq!(key, expected_key);
1071 assert_eq!(packet_len, expected_packet_size::<Ipv4>(body_fragment_blocks));
1072 }
1073 ExpectedResult::NeedMore => {
1074 assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1075 }
1076 ExpectedResult::Invalid => {
1077 assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1078 }
1079 ExpectedResult::OutOfMemory => {
1080 assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1081 }
1082 }
1083 }
1084
1085 fn process_ipv6_fragment<CC: FragmentContext<Ipv6, BC>, BC: FragmentBindingsContext>(
1089 core_ctx: &mut CC,
1090 bindings_ctx: &mut BC,
1091 FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1092 builder: Ipv6PacketBuilder,
1093 expected_result: ExpectedResult<Ipv6>,
1094 ) {
1095 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1096 builder,
1097 FragmentOffset::new(offset).unwrap(),
1098 m_flag,
1099 id.into(),
1100 );
1101
1102 let body = generate_body_fragment(
1103 id,
1104 offset,
1105 usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1106 );
1107
1108 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1109 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1110
1111 let actual_result =
1112 FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1113 match expected_result {
1114 ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1115 let (key, packet_len) = assert_matches!(
1116 actual_result,
1117 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1118 );
1119 assert_eq!(key, expected_key);
1120 assert_eq!(packet_len, expected_packet_size::<Ipv6>(body_fragment_blocks));
1121 }
1122 ExpectedResult::NeedMore => {
1123 assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1124 }
1125 ExpectedResult::Invalid => {
1126 assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1127 }
1128 ExpectedResult::OutOfMemory => {
1129 assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1130 }
1131 }
1132 }
1133
1134 trait TestIpExt: IpExt + netstack3_base::testutil::TestIpExt + ReassemblyIpExt {
1135 const HEADER_LENGTH: usize;
1136
1137 const PROTOCOL: Self::Proto;
1138
1139 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1140 core_ctx: &mut CC,
1141 bindings_ctx: &mut BC,
1142 spec: FragmentSpec,
1143 expected_result: ExpectedResult<Self>,
1144 );
1145 }
1146
1147 impl TestIpExt for Ipv4 {
1148 const HEADER_LENGTH: usize = packet_formats::ipv4::HDR_PREFIX_LEN;
1149
1150 const PROTOCOL: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1151
1152 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1153 core_ctx: &mut CC,
1154 bindings_ctx: &mut BC,
1155 spec: FragmentSpec,
1156 expected_result: ExpectedResult<Ipv4>,
1157 ) {
1158 process_ipv4_fragment(core_ctx, bindings_ctx, spec, get_ipv4_builder(), expected_result)
1159 }
1160 }
1161 impl TestIpExt for Ipv6 {
1162 const HEADER_LENGTH: usize = packet_formats::ipv6::IPV6_FIXED_HDR_LEN;
1163
1164 const PROTOCOL: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1165
1166 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1167 core_ctx: &mut CC,
1168 bindings_ctx: &mut BC,
1169 spec: FragmentSpec,
1170 expected_result: ExpectedResult<Ipv6>,
1171 ) {
1172 process_ipv6_fragment(core_ctx, bindings_ctx, spec, get_ipv6_builder(), expected_result)
1173 }
1174 }
1175
1176 fn try_reassemble_ip_packet<
1180 I: TestIpExt + netstack3_base::IpExt,
1181 CC: FragmentContext<I, BC>,
1182 BC: FragmentBindingsContext,
1183 >(
1184 core_ctx: &mut CC,
1185 bindings_ctx: &mut BC,
1186 fragment_id: u16,
1187 body_fragment_blocks: u16,
1188 ) {
1189 let mut buffer: Vec<u8> = vec![
1190 0;
1191 usize::from(body_fragment_blocks)
1192 * usize::from(FRAGMENT_BLOCK_SIZE)
1193 + I::HEADER_LENGTH
1194 ];
1195 let mut buffer = &mut buffer[..];
1196 let key = test_key(fragment_id);
1197
1198 FragmentHandler::reassemble_packet(core_ctx, bindings_ctx, &key, &mut buffer).unwrap();
1199 let packet = I::Packet::parse_mut(&mut buffer, ()).unwrap();
1200
1201 let expected_body = generate_body_fragment(
1202 fragment_id,
1203 0,
1204 usize::from(body_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE),
1205 );
1206 assert_eq!(packet.body(), &expected_body[..]);
1207 }
1208
1209 fn generate_body_fragment(fragment_id: u16, fragment_offset: u16, len: usize) -> Vec<u8> {
1215 let start = usize::from(fragment_id)
1219 + usize::from(fragment_offset) * usize::from(FRAGMENT_BLOCK_SIZE);
1220 (start..start + len).map(|byte| byte as u8).collect()
1221 }
1222
1223 fn test_key<I: TestIpExt>(id: u16) -> FragmentCacheKey<I> {
1225 #[derive(GenericOverIp)]
1226 #[generic_over_ip(I, Ip)]
1227 struct Wrapper<I: ReassemblyIpExt>(I::FragmentCacheKeyPart);
1228
1229 let Wrapper(ip_specific_fields) =
1230 I::map_ip_out((), |()| Wrapper(Ipv4::PROTOCOL), |()| Wrapper(()));
1231
1232 FragmentCacheKey {
1233 src_ip: I::TEST_ADDRS.remote_ip.get(),
1234 dst_ip: I::TEST_ADDRS.local_ip.get(),
1235 fragment_id: id.into(),
1236 ip_specific_fields,
1237 }
1238 }
1239
1240 fn new_context<I: ReassemblyIpExt>() -> FakeCtxImpl<I> {
1241 FakeCtxImpl::<I>::with_default_bindings_ctx(|bindings_ctx| {
1242 FakeCoreCtxImpl::with_state(FakeFragmentContext::new(bindings_ctx))
1243 })
1244 }
1245
1246 #[test]
1247 fn test_ipv4_reassembly_not_needed() {
1248 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1249
1250 let builder = get_ipv4_builder();
1254 let body = [1, 2, 3, 4, 5];
1255 let mut buffer =
1256 builder.wrap_body(Buf::new(body.to_vec(), ..)).serialize_vec_outer().unwrap();
1257 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1258 assert_matches!(
1259 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1260 FragmentProcessingState::NotNeeded(unfragmented) if unfragmented.body() == body
1261 );
1262 }
1263
1264 #[test]
1265 #[should_panic(
1266 expected = "internal error: entered unreachable code: Should never call this function if the packet does not have a fragment header"
1267 )]
1268 fn test_ipv6_reassembly_not_needed() {
1269 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1270
1271 let builder = get_ipv6_builder();
1275 let mut buffer =
1276 builder.wrap_body(Buf::new(vec![1, 2, 3, 4, 5], ..)).serialize_vec_outer().unwrap();
1277 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1278 assert_matches!(
1279 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1280 FragmentProcessingState::InvalidFragment
1281 );
1282 }
1283
1284 #[ip_test(I)]
1285 #[test_case(1)]
1286 #[test_case(10)]
1287 #[test_case(100)]
1288 fn test_ip_reassembly<I: TestIpExt>(size: u16) {
1289 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1290 let id = 5;
1291
1292 I::process_ip_fragment(
1296 &mut core_ctx,
1297 &mut bindings_ctx,
1298 FragmentSpec { id, offset: 0, size, m_flag: true },
1299 ExpectedResult::NeedMore,
1300 );
1301
1302 I::process_ip_fragment(
1304 &mut core_ctx,
1305 &mut bindings_ctx,
1306 FragmentSpec { id, offset: size, size, m_flag: true },
1307 ExpectedResult::NeedMore,
1308 );
1309
1310 I::process_ip_fragment(
1312 &mut core_ctx,
1313 &mut bindings_ctx,
1314 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1315 ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1316 );
1317
1318 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 3 * size);
1319 }
1320
1321 #[test]
1322 fn test_ipv4_key_uniqueness() {
1323 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1324
1325 const RIGHT_SRC: Ipv4Addr = net_ip_v4!("192.0.2.1");
1326 const WRONG_SRC: Ipv4Addr = net_ip_v4!("192.0.2.2");
1327
1328 const RIGHT_DST: Ipv4Addr = net_ip_v4!("192.0.2.3");
1329 const WRONG_DST: Ipv4Addr = net_ip_v4!("192.0.2.4");
1330
1331 const RIGHT_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1332 const WRONG_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Udp);
1333
1334 const RIGHT_ID: u16 = 1;
1335 const WRONG_ID: u16 = 2;
1336
1337 const TTL: u8 = 1;
1338
1339 process_ipv4_fragment(
1341 &mut core_ctx,
1342 &mut bindings_ctx,
1343 FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1344 Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1345 ExpectedResult::NeedMore,
1346 );
1347
1348 for (id, src, dst, proto) in [
1351 (RIGHT_ID, RIGHT_SRC, RIGHT_DST, WRONG_PROTO),
1352 (RIGHT_ID, RIGHT_SRC, WRONG_DST, RIGHT_PROTO),
1353 (RIGHT_ID, WRONG_SRC, RIGHT_DST, RIGHT_PROTO),
1354 (WRONG_ID, RIGHT_SRC, RIGHT_DST, RIGHT_PROTO),
1355 ] {
1356 process_ipv4_fragment(
1357 &mut core_ctx,
1358 &mut bindings_ctx,
1359 FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1360 Ipv4PacketBuilder::new(src, dst, TTL, proto),
1361 ExpectedResult::NeedMore,
1362 );
1363 }
1364
1365 const KEY: FragmentCacheKey<Ipv4> = FragmentCacheKey {
1368 src_ip: RIGHT_SRC,
1369 dst_ip: RIGHT_DST,
1370 fragment_id: RIGHT_ID as u32,
1371 ip_specific_fields: RIGHT_PROTO,
1372 };
1373 process_ipv4_fragment(
1374 &mut core_ctx,
1375 &mut bindings_ctx,
1376 FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1377 Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1378 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1379 );
1380 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv4>(2)];
1381 let mut buffer = &mut buffer[..];
1382 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1383 .expect("reassembly should succeed");
1384 let _packet = Ipv4Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1385 }
1386
1387 #[test]
1388 fn test_ipv6_key_uniqueness() {
1389 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1390
1391 const RIGHT_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1392 const WRONG_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1393
1394 const RIGHT_DST: Ipv6Addr = net_ip_v6!("2001:0db8::3");
1395 const WRONG_DST: Ipv6Addr = net_ip_v6!("2001:0db8::4");
1396
1397 const RIGHT_ID: u16 = 1;
1398 const WRONG_ID: u16 = 2;
1399
1400 const TTL: u8 = 1;
1401
1402 process_ipv6_fragment(
1404 &mut core_ctx,
1405 &mut bindings_ctx,
1406 FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1407 Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1408 ExpectedResult::NeedMore,
1409 );
1410
1411 for (id, src, dst) in [
1414 (RIGHT_ID, RIGHT_SRC, WRONG_DST),
1415 (RIGHT_ID, WRONG_SRC, RIGHT_DST),
1416 (WRONG_ID, RIGHT_SRC, RIGHT_DST),
1417 ] {
1418 process_ipv6_fragment(
1419 &mut core_ctx,
1420 &mut bindings_ctx,
1421 FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1422 Ipv6PacketBuilder::new(src, dst, TTL, Ipv6::PROTOCOL),
1423 ExpectedResult::NeedMore,
1424 );
1425 }
1426
1427 const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1430 src_ip: RIGHT_SRC,
1431 dst_ip: RIGHT_DST,
1432 fragment_id: RIGHT_ID as u32,
1433 ip_specific_fields: (),
1434 };
1435 process_ipv6_fragment(
1436 &mut core_ctx,
1437 &mut bindings_ctx,
1438 FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1439 Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1440 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1441 );
1442 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1443 let mut buffer = &mut buffer[..];
1444 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1445 .expect("reassembly should succeed");
1446 let _packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1447 }
1448
1449 #[test]
1450 fn test_ipv6_reassemble_different_protocols() {
1451 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1452
1453 const SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1454 const DST: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1455 const ID: u16 = 1;
1456 const TTL: u8 = 1;
1457
1458 const PROTO1: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1459 const PROTO2: Ipv6Proto = Ipv6Proto::Proto(IpProto::Udp);
1460
1461 process_ipv6_fragment(
1463 &mut core_ctx,
1464 &mut bindings_ctx,
1465 FragmentSpec { id: ID, offset: 0, size: 1, m_flag: true },
1466 Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO1),
1467 ExpectedResult::NeedMore,
1468 );
1469
1470 const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1474 src_ip: SRC,
1475 dst_ip: DST,
1476 fragment_id: ID as u32,
1477 ip_specific_fields: (),
1478 };
1479 process_ipv6_fragment(
1480 &mut core_ctx,
1481 &mut bindings_ctx,
1482 FragmentSpec { id: ID, offset: 1, size: 1, m_flag: false },
1483 Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO2),
1484 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1485 );
1486 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1487 let mut buffer = &mut buffer[..];
1488 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1489 .expect("reassembly should succeed");
1490 let packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1491 assert_eq!(packet.proto(), PROTO1);
1492 }
1493
1494 #[ip_test(I)]
1495 #[test_case(1)]
1496 #[test_case(10)]
1497 #[test_case(100)]
1498 fn test_ip_reassemble_with_missing_blocks<I: TestIpExt>(size: u16) {
1499 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1500 let id = 5;
1501
1502 I::process_ip_fragment(
1507 &mut core_ctx,
1508 &mut bindings_ctx,
1509 FragmentSpec { id, offset: 0, size, m_flag: true },
1510 ExpectedResult::NeedMore,
1511 );
1512
1513 I::process_ip_fragment(
1515 &mut core_ctx,
1516 &mut bindings_ctx,
1517 FragmentSpec { id, offset: size, size, m_flag: true },
1518 ExpectedResult::NeedMore,
1519 );
1520
1521 let mut buffer: Vec<u8> = vec![0; 1];
1522 let mut buffer = &mut buffer[..];
1523 let key = test_key(id);
1524 assert_eq!(
1525 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1526 .unwrap_err(),
1527 FragmentReassemblyError::MissingFragments,
1528 );
1529 }
1530
1531 #[ip_test(I)]
1532 fn test_ip_reassemble_after_timer<I: TestIpExt>() {
1533 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1534 let id = 5;
1535 let key = test_key::<I>(id);
1536
1537 bindings_ctx.timers.assert_no_timers_installed();
1539 assert_eq!(core_ctx.state.cache.size, 0);
1540
1541 I::process_ip_fragment(
1545 &mut core_ctx,
1546 &mut bindings_ctx,
1547 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1548 ExpectedResult::NeedMore,
1549 );
1550
1551 core_ctx.state.cache.timers.assert_timers([(
1553 key,
1554 (),
1555 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1556 )]);
1557 validate_size(&core_ctx.state.cache);
1558
1559 I::process_ip_fragment(
1561 &mut core_ctx,
1562 &mut bindings_ctx,
1563 FragmentSpec { id, offset: 1, size: 1, m_flag: true },
1564 ExpectedResult::NeedMore,
1565 );
1566 core_ctx.state.cache.timers.assert_timers([(
1568 key,
1569 (),
1570 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1571 )]);
1572 validate_size(&core_ctx.state.cache);
1573
1574 I::process_ip_fragment(
1576 &mut core_ctx,
1577 &mut bindings_ctx,
1578 FragmentSpec { id, offset: 2, size: 1, m_flag: false },
1579 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id) },
1580 );
1581 core_ctx.state.cache.timers.assert_timers([(
1583 key,
1584 (),
1585 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1586 )]);
1587 validate_size(&core_ctx.state.cache);
1588
1589 assert_eq!(
1591 bindings_ctx.trigger_next_timer(&mut core_ctx),
1592 Some(FragmentTimerId::<I>::default())
1593 );
1594
1595 bindings_ctx.timers.assert_no_timers_installed();
1597 assert_eq!(core_ctx.state.cache.size, 0);
1598
1599 let key = test_key(id);
1602 let packet_len = 44;
1603 let mut buffer: Vec<u8> = vec![0; packet_len];
1604 let mut buffer = &mut buffer[..];
1605 assert_eq!(
1606 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1607 .unwrap_err(),
1608 FragmentReassemblyError::InvalidKey,
1609 );
1610 }
1611
1612 #[ip_test(I)]
1613 #[test_case(1)]
1614 #[test_case(10)]
1615 #[test_case(100)]
1616 fn test_ip_fragment_cache_oom<I: TestIpExt>(size: u16) {
1617 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1618 let mut id = 0;
1619 const THRESHOLD: usize = 8196usize;
1620
1621 assert_eq!(core_ctx.state.cache.size, 0);
1622 core_ctx.state.cache.threshold = THRESHOLD;
1623
1624 while core_ctx.state.cache.size + usize::from(size) <= THRESHOLD {
1627 I::process_ip_fragment(
1628 &mut core_ctx,
1629 &mut bindings_ctx,
1630 FragmentSpec { id, offset: 0, size, m_flag: true },
1631 ExpectedResult::NeedMore,
1632 );
1633 validate_size(&core_ctx.state.cache);
1634 id += 1;
1635 }
1636
1637 I::process_ip_fragment(
1639 &mut core_ctx,
1640 &mut bindings_ctx,
1641 FragmentSpec { id, offset: 0, size, m_flag: true },
1642 ExpectedResult::OutOfMemory,
1643 );
1644 validate_size(&core_ctx.state.cache);
1645
1646 let _timers = bindings_ctx
1648 .trigger_timers_for(I::REASSEMBLY_TIMEOUT + Duration::from_secs(1), &mut core_ctx);
1649 assert_eq!(core_ctx.state.cache.size, 0);
1650 validate_size(&core_ctx.state.cache);
1651
1652 I::process_ip_fragment(
1654 &mut core_ctx,
1655 &mut bindings_ctx,
1656 FragmentSpec { id, offset: 0, size, m_flag: true },
1657 ExpectedResult::NeedMore,
1658 );
1659 }
1660
1661 #[ip_test(I)]
1662 #[test_case(1)]
1663 #[test_case(10)]
1664 #[test_case(100)]
1665 fn test_unordered_fragments<I: TestIpExt>(size: u16) {
1666 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1667 let id = 5;
1668
1669 I::process_ip_fragment(
1671 &mut core_ctx,
1672 &mut bindings_ctx,
1673 FragmentSpec { id, offset: 0, size, m_flag: true },
1674 ExpectedResult::NeedMore,
1675 );
1676
1677 I::process_ip_fragment(
1679 &mut core_ctx,
1680 &mut bindings_ctx,
1681 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1682 ExpectedResult::NeedMore,
1683 );
1684
1685 I::process_ip_fragment(
1687 &mut core_ctx,
1688 &mut bindings_ctx,
1689 FragmentSpec { id, offset: size, size, m_flag: true },
1690 ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1691 );
1692 }
1693
1694 #[ip_test(I)]
1695 #[test_case(1)]
1696 #[test_case(10)]
1697 #[test_case(100)]
1698 fn test_ip_duplicate_fragment<I: TestIpExt>(size: u16) {
1699 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1700 let id = 5;
1701
1702 I::process_ip_fragment(
1704 &mut core_ctx,
1705 &mut bindings_ctx,
1706 FragmentSpec { id, offset: 0, size, m_flag: true },
1707 ExpectedResult::NeedMore,
1708 );
1709
1710 I::process_ip_fragment(
1712 &mut core_ctx,
1713 &mut bindings_ctx,
1714 FragmentSpec { id, offset: 0, size, m_flag: true },
1715 ExpectedResult::NeedMore,
1716 );
1717
1718 I::process_ip_fragment(
1721 &mut core_ctx,
1722 &mut bindings_ctx,
1723 FragmentSpec { id, offset: size, size, m_flag: false },
1724 ExpectedResult::Ready { body_fragment_blocks: 2 * size, key: test_key(id) },
1725 );
1726
1727 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 2 * size);
1728 }
1729
1730 #[ip_test(I)]
1731 #[test_case(1)]
1732 #[test_case(10)]
1733 #[test_case(100)]
1734 fn test_ip_out_of_bounds_fragment<I: TestIpExt>(size: u16) {
1735 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1736 let id = 5;
1737
1738 I::process_ip_fragment(
1740 &mut core_ctx,
1741 &mut bindings_ctx,
1742 FragmentSpec { id, offset: size, size, m_flag: false },
1743 ExpectedResult::NeedMore,
1744 );
1745
1746 I::process_ip_fragment(
1749 &mut core_ctx,
1750 &mut bindings_ctx,
1751 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1752 ExpectedResult::Invalid,
1753 );
1754 }
1755
1756 #[ip_test(I)]
1757 #[test_case(50, 100; "overlaps_front")]
1758 #[test_case(150, 100; "overlaps_back")]
1759 #[test_case(50, 200; "overlaps_both")]
1760 fn test_ip_overlapping_fragment<I: TestIpExt>(offset: u16, size: u16) {
1761 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1762 let id = 5;
1763
1764 I::process_ip_fragment(
1766 &mut core_ctx,
1767 &mut bindings_ctx,
1768 FragmentSpec { id, offset: 100, size: 100, m_flag: true },
1769 ExpectedResult::NeedMore,
1770 );
1771
1772 I::process_ip_fragment(
1775 &mut core_ctx,
1776 &mut bindings_ctx,
1777 FragmentSpec { id, offset, size, m_flag: true },
1778 ExpectedResult::Invalid,
1779 );
1780 }
1781
1782 #[test]
1783 fn test_ipv4_fragment_not_multiple_of_offset_unit() {
1784 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1785 let id = 0;
1786
1787 assert_eq!(core_ctx.state.cache.size, 0);
1788 process_ipv4_fragment(
1793 &mut core_ctx,
1794 &mut bindings_ctx,
1795 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1796 get_ipv4_builder(),
1797 ExpectedResult::NeedMore,
1798 );
1799
1800 let mut builder = get_ipv4_builder();
1803 builder.id(id);
1804 builder.fragment_offset(FragmentOffset::new(1).unwrap());
1805 builder.mf_flag(true);
1806 let mut body: Vec<u8> = Vec::new();
1809 body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1810 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1811 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1812 assert_matches!(
1813 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1814 FragmentProcessingState::InvalidFragment
1815 );
1816
1817 let mut builder = get_ipv4_builder();
1821 builder.id(id);
1822 builder.fragment_offset(FragmentOffset::new(1).unwrap());
1823 builder.mf_flag(false);
1824 let mut body: Vec<u8> = Vec::new();
1827 body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1828 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1829 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1830 let (key, packet_len) = assert_matches!(
1831 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1832 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1833 );
1834 assert_eq!(key, test_key(id));
1835 assert_eq!(packet_len, 35);
1836 validate_size(&core_ctx.state.cache);
1837 let mut buffer: Vec<u8> = vec![0; packet_len];
1838 let mut buffer = &mut buffer[..];
1839 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1840 .unwrap();
1841 let packet = Ipv4Packet::parse_mut(&mut buffer, ()).unwrap();
1842 let mut expected_body: Vec<u8> = Vec::new();
1843 expected_body.extend(0..15);
1844 assert_eq!(packet.body(), &expected_body[..]);
1845 assert_eq!(core_ctx.state.cache.size, 0);
1846 }
1847
1848 #[test]
1849 fn test_ipv6_fragment_not_multiple_of_offset_unit() {
1850 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1851 let id = 0;
1852
1853 assert_eq!(core_ctx.state.cache.size, 0);
1854 process_ipv6_fragment(
1859 &mut core_ctx,
1860 &mut bindings_ctx,
1861 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1862 get_ipv6_builder(),
1863 ExpectedResult::NeedMore,
1864 );
1865
1866 let offset = 1;
1869 let body_size: usize = (FRAGMENT_BLOCK_SIZE - 1).into();
1870 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1871 get_ipv6_builder(),
1872 FragmentOffset::new(offset).unwrap(),
1873 true,
1874 id.into(),
1875 );
1876 let body = generate_body_fragment(id, offset, body_size);
1877 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1878 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1879 assert_matches!(
1880 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1881 FragmentProcessingState::InvalidFragment
1882 );
1883
1884 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1888 get_ipv6_builder(),
1889 FragmentOffset::new(offset).unwrap(),
1890 false,
1891 id.into(),
1892 );
1893 let body = generate_body_fragment(id, offset, body_size);
1894 let mut buffer = builder.wrap_body(Buf::new(body, ..)).serialize_vec_outer().unwrap();
1895 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1896 let (key, packet_len) = assert_matches!(
1897 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1898 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1899 );
1900 assert_eq!(key, test_key(id));
1901 assert_eq!(packet_len, 55);
1902
1903 validate_size(&core_ctx.state.cache);
1904 let mut buffer: Vec<u8> = vec![0; packet_len];
1905 let mut buffer = &mut buffer[..];
1906 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1907 .unwrap();
1908 let packet = Ipv6Packet::parse_mut(&mut buffer, ()).unwrap();
1909 let mut expected_body: Vec<u8> = Vec::new();
1910 expected_body.extend(0..15);
1911 assert_eq!(packet.body(), &expected_body[..]);
1912 assert_eq!(core_ctx.state.cache.size, 0);
1913 }
1914
1915 #[ip_test(I)]
1916 fn test_ip_reassembly_with_multiple_intertwined_packets<I: TestIpExt>() {
1917 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1918 const SIZE: u16 = 1;
1919 let id_0 = 5;
1920 let id_1 = 10;
1921
1922 I::process_ip_fragment(
1927 &mut core_ctx,
1928 &mut bindings_ctx,
1929 FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
1930 ExpectedResult::NeedMore,
1931 );
1932
1933 I::process_ip_fragment(
1935 &mut core_ctx,
1936 &mut bindings_ctx,
1937 FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
1938 ExpectedResult::NeedMore,
1939 );
1940
1941 I::process_ip_fragment(
1943 &mut core_ctx,
1944 &mut bindings_ctx,
1945 FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
1946 ExpectedResult::NeedMore,
1947 );
1948
1949 I::process_ip_fragment(
1951 &mut core_ctx,
1952 &mut bindings_ctx,
1953 FragmentSpec { id: id_1, offset: 1, size: SIZE, m_flag: true },
1954 ExpectedResult::NeedMore,
1955 );
1956
1957 I::process_ip_fragment(
1959 &mut core_ctx,
1960 &mut bindings_ctx,
1961 FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
1962 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
1963 );
1964
1965 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
1966
1967 I::process_ip_fragment(
1969 &mut core_ctx,
1970 &mut bindings_ctx,
1971 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
1972 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_1) },
1973 );
1974
1975 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_1, 3);
1976 }
1977
1978 #[ip_test(I)]
1979 fn test_ip_reassembly_timer_with_multiple_intertwined_packets<I: TestIpExt>() {
1980 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1981 const SIZE: u16 = 1;
1982 let id_0 = 5;
1983 let id_1 = 10;
1984 let id_2 = 15;
1985
1986 const BEFORE_TIMEOUT1: Duration = Duration::from_secs(1);
2011 const BEFORE_TIMEOUT2: Duration = Duration::from_secs(2);
2012 const BEFORE_TIMEOUT3: Duration = Duration::from_secs(3);
2013 assert!(BEFORE_TIMEOUT1 < I::REASSEMBLY_TIMEOUT);
2014 assert!(BEFORE_TIMEOUT2 < I::REASSEMBLY_TIMEOUT);
2015 assert!(BEFORE_TIMEOUT3 < I::REASSEMBLY_TIMEOUT);
2016
2017 I::process_ip_fragment(
2019 &mut core_ctx,
2020 &mut bindings_ctx,
2021 FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
2022 ExpectedResult::NeedMore,
2023 );
2024
2025 I::process_ip_fragment(
2027 &mut core_ctx,
2028 &mut bindings_ctx,
2029 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
2030 ExpectedResult::NeedMore,
2031 );
2032
2033 I::process_ip_fragment(
2035 &mut core_ctx,
2036 &mut bindings_ctx,
2037 FragmentSpec { id: id_2, offset: 2, size: SIZE, m_flag: false },
2038 ExpectedResult::NeedMore,
2039 );
2040
2041 assert_empty(
2043 bindings_ctx
2044 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT1), &mut core_ctx),
2045 );
2046
2047 I::process_ip_fragment(
2049 &mut core_ctx,
2050 &mut bindings_ctx,
2051 FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
2052 ExpectedResult::NeedMore,
2053 );
2054
2055 assert_empty(
2057 bindings_ctx
2058 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT2), &mut core_ctx),
2059 );
2060
2061 I::process_ip_fragment(
2063 &mut core_ctx,
2064 &mut bindings_ctx,
2065 FragmentSpec { id: id_2, offset: 1, size: SIZE, m_flag: true },
2066 ExpectedResult::NeedMore,
2067 );
2068
2069 I::process_ip_fragment(
2071 &mut core_ctx,
2072 &mut bindings_ctx,
2073 FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
2074 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
2075 );
2076
2077 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
2078
2079 assert_empty(
2081 bindings_ctx
2082 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT3), &mut core_ctx),
2083 );
2084
2085 I::process_ip_fragment(
2087 &mut core_ctx,
2088 &mut bindings_ctx,
2089 FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
2090 ExpectedResult::NeedMore,
2091 );
2092
2093 I::process_ip_fragment(
2095 &mut core_ctx,
2096 &mut bindings_ctx,
2097 FragmentSpec { id: id_2, offset: 0, size: SIZE, m_flag: true },
2098 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_2) },
2099 );
2100
2101 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_2, 3);
2102
2103 bindings_ctx.trigger_timers_until_and_expect_unordered(
2106 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
2107 [FragmentTimerId::<I>::default()],
2108 &mut core_ctx,
2109 );
2110
2111 bindings_ctx.timers.assert_no_timers_installed();
2113
2114 I::process_ip_fragment(
2118 &mut core_ctx,
2119 &mut bindings_ctx,
2120 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: true },
2121 ExpectedResult::NeedMore,
2122 );
2123 }
2124
2125 #[test]
2126 fn test_no_more_fragments_in_middle_of_block() {
2127 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
2128 process_ipv4_fragment(
2129 &mut core_ctx,
2130 &mut bindings_ctx,
2131 FragmentSpec { id: 0, offset: 100, size: 1, m_flag: false },
2132 get_ipv4_builder(),
2133 ExpectedResult::NeedMore,
2134 );
2135
2136 process_ipv4_fragment(
2137 &mut core_ctx,
2138 &mut bindings_ctx,
2139 FragmentSpec { id: 0, offset: 50, size: 1, m_flag: false },
2140 get_ipv4_builder(),
2141 ExpectedResult::Invalid,
2142 );
2143 }
2144
2145 #[ip_test(I)]
2146 fn test_cancel_timer_on_overlap<I: TestIpExt>() {
2147 const FRAGMENT_ID: u16 = 1;
2148
2149 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
2150
2151 let key = test_key(FRAGMENT_ID);
2152
2153 for _ in 0..=2 {
2156 I::process_ip_fragment(
2157 &mut core_ctx,
2158 &mut bindings_ctx,
2159 FragmentSpec { id: FRAGMENT_ID, offset: 0, size: 10, m_flag: true },
2160 ExpectedResult::NeedMore,
2161 );
2162 core_ctx
2163 .state
2164 .cache
2165 .timers
2166 .assert_timers_after(&mut bindings_ctx, [(key, (), I::REASSEMBLY_TIMEOUT)]);
2167
2168 I::process_ip_fragment(
2169 &mut core_ctx,
2170 &mut bindings_ctx,
2171 FragmentSpec { id: FRAGMENT_ID, offset: 5, size: 10, m_flag: true },
2172 ExpectedResult::Invalid,
2173 );
2174 assert_eq!(bindings_ctx.timers.timers(), [],);
2175 }
2176 }
2177}